source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
assistant_library_with_button_demo.py
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Activates the Google Assistant with either a hotword or a button press, using the
Google Assistant Library.
The Google Assistant Library has direct access to the audio API, so this Python
code doesn't need to record audio.
.. note:
Hotword detection (such as "Okay Google") is supported only with Raspberry Pi 2/3.
If you're using a Pi Zero, this code works but you must press the button to activate
the Google Assistant.
"""
import logging
import platform
import sys
import threading
from google.assistant.library.event import EventType
from aiy.assistant import auth_helpers
from aiy.assistant.library import Assistant
from aiy.board import Board, Led
class MyAssistant:
"""An assistant that runs in the background.
The Google Assistant Library event loop blocks the running thread entirely.
To support the button trigger, we need to run the event loop in a separate
thread. Otherwise, the on_button_pressed() method will never get a chance to
be invoked.
"""
def __init__(self):
self._task = threading.Thread(target=self._run_task)
self._can_start_conversation = False
self._assistant = None
self._board = Board()
self._board.button.when_pressed = self._on_button_pressed
def start(self):
"""
Starts the assistant event loop and begins processing events.
"""
self._task.start()
def _run_task(self):
credentials = auth_helpers.get_assistant_credentials()
with Assistant(credentials) as assistant:
self._assistant = assistant
for event in assistant.start():
self._process_event(event)
def _process_event(self, event):
logging.info(event)
if event.type == EventType.ON_START_FINISHED:
self._board.led.status = Led.BEACON_DARK # Ready.
self._can_start_conversation = True
# Start the voicehat button trigger.
logging.info('Say "OK, Google" or press the button, then speak. '
'Press Ctrl+C to quit...')
elif event.type == EventType.ON_CONVERSATION_TURN_STARTED:
self._can_start_conversation = False
self._board.led.state = Led.ON # Listening.
elif event.type == EventType.ON_END_OF_UTTERANCE:
self._board.led.state = Led.PULSE_QUICK # Thinking.
elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
or event.type == EventType.ON_NO_RESPONSE):
self._board.led.state = Led.BEACON_DARK # Ready.
self._can_start_conversation = True
elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']:
sys.exit(1)
def _on_button_pressed(self):
# Check if we can start a conversation. 'self._can_start_conversation'
# is False when either:
# 1. The assistant library is not yet ready; OR
# 2. The assistant library is already in a conversation.
if self._can_start_conversation:
self._assistant.start_conversation()
def main():
logging.basicConfig(level=logging.INFO)
MyAssistant().start()
if __name__ == '__main__':
main()
|
New_Data_Extractor.py
|
from collections import Counter
import numpy as np
import heapq
import csv
import time as time
import multiprocessing as mp
import os
import sys
def TransData(NumCustomer):
#Pull Data from Retail CSV
TransDict = {}
TransList= {}
#2011 Retail Data for TrainX
with open('C:\Users\Luke Farrell\Google Drive\Luke - Shared\Jaeger\JAEG_2011_AdhocTransactionHeaders_DT20160304_T1059_CT365501.csv', 'rb') as retailCsv:
retailDict = csv.DictReader(retailCsv)
for row in retailDict:
if row['CustomerId'] not in TransDict:
TransDict[row['CustomerId']] = {}
transaction = "transaction1"
TransDict[row['CustomerId']][transaction] = [row['TotalTransValue'], row['Quantity'], row['SalesAssociateName'], row['TransDateTime']]
TransList[row['CustomerId']] = 0
else:
TransList[row['CustomerId']] += 1
transaction = "transaction" + str(TransList[row['CustomerId']])
TransDict[row['CustomerId']][transaction]= [row['TotalTransValue'], row['Quantity'], row['SalesAssociateName'], row['TransDateTime']]
if len(TransDict) > NumCustomer/2:
break
else:
pass
print TransDict
retailCsv.close()
print "2011 Retail Extracted"
#2010 Retail Data for TrainX
with open('C:\Users\Luke Farrell\Google Drive\Luke - Shared\Jaeger\JAEG_2010_AdhocTransactionHeaders_DT20160304_T1136_CT388935.csv', 'rb') as retailCsv:
retailDict = csv.DictReader(retailCsv)
for row in retailDict:
if row['CustomerId'] not in TransDict:
TransDict[row['CustomerId']] = {}
transaction = "transaction1"
TransDict[row['CustomerId']][transaction] = [row['TotalTransValue'], row['Quantity'], row['SalesAssociateName'], row['TransDateTime']]
TransList[row['CustomerId']] = 0
else:
TransList[row['CustomerId']] += 1
transaction = "transaction" + str(TransList[row['CustomerId']])
TransDict[row['CustomerId']][transaction]= [row['TotalTransValue'], row['Quantity'], row['SalesAssociateName'], row['TransDateTime']]
if len(TransDict) > NumCustomer:
break
else:
pass
retailCsv.close()
print "2010 Retail Extracted"
return TransDict
def ProductData(NumCustomer):
ProductDict = {}
RepeatCounter = []
#2011 Product Data
with open('C:\Users\Luke Farrell\Google Drive\Luke - Shared\Jaeger\JAEG_2011_AdhocTransactionDetails_DT20160304_T1105_CT805591.csv', 'rb') as productCsv:
DetailDict = csv.DictReader(productCsv)
for row in DetailDict:
if row['CustomerId'] not in ProductDict:
ProductDict[row['CustomerId']] = {}
product = "product1"
ProductDict[row['CustomerId']][product] = [row['Product'], row['AspGBP'], row['Colour'], row['Size'], row['Type']]
else:
RepeatCounter.append(row['CustomerId'])
RepeatDict=Counter(RepeatCounter)
product = "product" + str(RepeatDict[row['CustomerId']])
ProductDict[row['CustomerId']][product] = [row['Product'], row['AspGBP'], row['Colour'], row['Size'], row['Type']]
if len(ProductDict) > NumCustomer/2:
break
else:
pass
productCsv.close()
print "2011 Product Extracted"
#2010 Product Data
with open('C:\Users\Luke Farrell\Google Drive\Luke - Shared\Jaeger\JAEG_2010_AdhocTransactionDetails_DT20160304_T1143_CT891360.csv', 'rb') as productCsv:
DetailDict = csv.DictReader(productCsv)
for row in DetailDict:
if row['CustomerId'] not in ProductDict:
ProductDict[row['CustomerId']] = {}
product = "product1"
ProductDict[row['CustomerId']][product] = [row['Product'], row['AspGBP'], row['Colour'], row['Size'], row['Type']]
else:
RepeatCounter.append(row['CustomerId'])
RepeatDict=Counter(RepeatCounter)
product = "product" + str(RepeatDict[row['CustomerId']])
ProductDict[row['CustomerId']][product] = [row['Product'], row['AspGBP'], row['Colour'], row['Size'], row['Type']]
if len(ProductDict) > NumCustomer:
break
else:
pass
productCsv.close()
print "2010 Product Extracted"
#2012 Retail Data for TrainY
results.put(ProductDict)
return ProductDict
def TargetData(NumCustomer):
customerFutureDict = {}
with open("C:\Users\Luke Farrell\Google Drive\Luke - Shared\Jaeger\JAEG_2012_AdhocTransactionHeaders_DT20160304_T1041_CT329923.csv", 'rb') as newRetailCsv:
newRetailDict = csv.DictReader(newRetailCsv)
for row in newRetailDict:
if row['CustomerId'] not in customerFutureDict:
customerFutureDict[row['CustomerId']] = [float(row['TotalTransValue'])]
else:
customerFutureDict[row['CustomerId']].append(float(row['TotalTransValue']))
if len(customerFutureDict) > NumCustomer:
break
else:
pass
customerFuture = {k:sum(v) for k,v in customerFutureDict.items()}
newRetailCsv.close()
print "2012 Target Extracted"
return customerFuture
def MasterData(TransDict, ProductDict, customerFuture):
#Create Customer Lifetime Value Dictionary
CLVDict = {}
for customer in TransDict:
LifeTime = 0
TransAvgList = []
CustomerAvgList = []
NumProducts = []
for trans in TransDict[customer]:
#Calculate Avg Transaction Price
TransAvgList.append(float(TransDict[customer][trans][0]))
#Calculate Avg Number of Products per Purchase
NumProducts.append(int(TransDict[customer][trans][1]))
#Calculate LifeTime
FirstDate = (int(TransDict[customer][trans][-1][0:4]),int(TransDict[customer][trans][-1][5:7]), int(TransDict[customer][trans][-1][8:10]))
RecentDate = (int(TransDict[customer]["transaction1"][-1][0:4]),int(TransDict[customer]["transaction1"][-1][5:7]), int(TransDict[customer]["transaction1"][-1][8:10]))
if len(TransDict[customer]) > 1:
LifeTime += (RecentDate[0]-FirstDate[0])*365
LifeTime += (RecentDate[1]- FirstDate[1])*30
LifeTime += RecentDate[2] - FirstDate[2]
#Calculate Avg Price per Product
prodPrice = []
for product in ProductDict[customer]:
prodPrice.append(float(ProductDict[customer][product][1]))
#Calculate total future spending for Train Y
if customer not in customerFuture:
FutureSpending = 0
else:
FutureSpending = customerFuture[customer]
NumVisits = len(TransDict[customer])
AverageSpent = round(np.average(TransAvgList),4)
AvgNumProducts = np.average(NumProducts)
AvgProdPrice = np.average(prodPrice)
CLV = NumVisits * AverageSpent
CustomerAvgList.append(FutureSpending)
CustomerAvgList.append(CLV)
CustomerAvgList.append(LifeTime)
CustomerAvgList.append(NumVisits)
CustomerAvgList.append(AverageSpent)
CustomerAvgList.append(AvgNumProducts)
CustomerAvgList.append(AvgProdPrice)
CLVDict[customer] = CustomerAvgList
print ""
#Create Master Searchable Dictionary with all Information
MasterDict = {}
for customer in TransDict:
MasterDict[customer] = {}
MasterDict[customer]["Product History"] = ProductDict[customer]
MasterDict[customer]["Transaction History"] = TransDict[customer]
MasterDict[customer]["CLV"] = CLVDict[customer]
#Return The Master Dictionary with all the Information
print "Data Extraction Finished"
np.save('MasterDict.npy', MasterDict)
return MasterDict
def Data(NumCustomers):
# start = time.time()
# results = mp.Queue()
# jobs = []
# p1 = mp.Process(target = TransData, args = (NumCustomers,results))
# jobs.append(p1)
# p1.start()
# p2 = mp.Process(target = ProductData, args = (NumCustomers,results))
# jobs.append(p2)
# p2.start()
# p3 = mp.Process(target = TargetData, args = (NumCustomers,results))
# jobs.append(p3)
# p3.start()
#
# resultsList = []
# for x in range(len(jobs)):
# resultsList.append(results.get())
#
# for process in jobs:
# process.join()
#
# end = time.time()
# print end-start
a = TransData(NumCustomers)
b = ProductData(NumCustomers)
c = TargetData(NumCustomers)
return MasterData(a,b,c)
#################################################################################
if __name__ == '__main__':
#Intialize the Numer of Customers you want to extract
# start = time.time()
NumCustomers =100
Data(NumCustomers)
# pool = mp.Pool()
#
# p1 = pool.map_async(TransData1, (NumCustomers,))
# p2 = pool.map_async(ProductData1, (NumCustomers,))
# p3 = pool.map_async(TargetData1, (NumCustomers,))
#
# MasterData(p1.get()[0],p2.get()[0],p3.get()[0])
# end = time.time()
# print end- start
#
# results = mp.Queue()
# start = time.time()
# a = TransData(NumCustomers,results)
# b = ProductData(NumCustomers,results)
# c = TargetData(NumCustomers,results)
# MasterData(a,b,c)
# end = time.time()
# print end - start
##
# start = time.time()
# results = mp.Queue()
# jobs = []
# p1 = mp.Process(target = TransData, args = (NumCustomers,results))
# jobs.append(p1)
# p1.start()
# p2 = mp.Process(target = ProductData, args = (NumCustomers,results))
# jobs.append(p2)
# p2.start()
# p3 = mp.Process(target = TargetData, args = (NumCustomers,results))
# jobs.append(p3)
# p3.start()
#
# resultsList = []
# for x in range(len(jobs)):
# resultsList.append(results.get())
#
# for process in jobs:
# process.join()
#
# MasterData(resultsList[1], resultsList[2], resultsList[0])
#
# end = time.time()
# print end-start
|
introspection_container.py
|
import roslib;
import rospy
from std_msgs.msg import Header
import pickle
import threading
import rostopic
from smacc_msgs.msg import SmaccContainerStatus,SmaccContainerInitialStatusCmd,SmaccContainerStructure
__all__ = ['IntrospectionClient']
# Topic names
STATUS_TOPIC = '/smacc/status'
INIT_TOPIC = '/smacc/container_init'
from smacc_viewer.smacc_user_data import UserData
class IntrospectionClient():
def get_servers(self):
"""Get the base names that are broadcasting smacc states."""
# Get the currently broadcasted smacc introspection topics
topics = rostopic.find_by_type('smacc_msgs/SmaccStatus')
rootservernames= [t[:t.rfind(STATUS_TOPIC)] for t in topics]
return rootservernames
#return [t[:t.rfind(STATUS_TOPIC)] for t in topics]
def set_initial_state(self,
server,
path,
initial_states,
initial_userdata = UserData(),
timeout = None):
"""Set the initial state of a smacc server.
@type server: string
@param server: The name of the introspection server to which this client
should connect.
@type path: string
@param path: The path to the target container in the state machine.
@type initial_states: list of string
@param inital_state: The state the target container should take when it
starts. This is as list of at least one state label.
@type initial_userdata: UserData
@param initial_userdata: The userdata to inject into the target container.
@type timeout: rospy.Duration
@param timeout: Timeout for this call. If this is set to None, it will not
block, and the initial state may not be set before the target state machine
goes active.
"""
# Construct initial state command
initial_status_msg = SmaccContainerInitialStatusCmd(
path = path,
initial_states = initial_states,
local_data = pickle.dumps(initial_userdata._data,2))
# A status message to receive confirmation that the state was set properly
msg_response = SmaccContainerStatus()
# Define a local callback to just stuff a local message
def local_cb(msg, msg_response):
rospy.logdebug("Received status response: "+str(msg))
msg_response.path = msg.path
msg_response.initial_states = msg.initial_states
msg_response.local_data = msg.local_data
# Create a subscriber to verify the request went through
state_sub = rospy.Subscriber(server+STATUS_TOPIC, SmaccContainerStatus,
callback=local_cb, callback_args=msg_response)
# Create a publisher to send the command
rospy.logdebug("Sending initial state command: "+str(initial_status_msg.path)+" on topic '"+server+INIT_TOPIC+"'")
init_pub = rospy.Publisher(server+INIT_TOPIC,
SmaccContainerInitialStatusCmd, queue_size=1)
init_pub.publish(initial_status_msg)
start_time = rospy.Time.now()
# Block until we get a new state back
if timeout is not None:
while rospy.Time.now() - start_time < timeout:
# Send the initial state command
init_pub.publish(initial_status_msg)
# Filter messages that are from other containers
if msg_response.path == path:
# Check if the heartbeat came back to match
state_match = all([s in msg_response.initial_states for s in initial_states])
local_data = UserData()
local_data._data = pickle.loads(msg_response.local_data)
ud_match = all([\
(key in local_data and local_data._data[key] == initial_userdata._data[key])\
for key in initial_userdata._data])
rospy.logdebug("STATE MATCH: "+str(state_match)+", UD_MATCH: "+str(ud_match))
if state_match and ud_match:
return True
rospy.sleep(0.3)
return False
class ContainerProxy():
"""smacc Container Introspection proxy.
This class is used as a container for introspection and debugging.
"""
def __init__(self, server_name, container, path, update_rate=rospy.Duration(2.0)):
"""Constructor for tree-wide data structure.
"""
self._path = path
self._container = container
self._update_rate = update_rate
self._status_pub_lock = threading.Lock()
# Advertise init service
self._init_cmd = rospy.Subscriber(
server_name + INIT_TOPIC,
SmaccContainerInitialStatusCmd,
self._init_cmd_cb)
# Advertise structure publisher
self._structure_pub = rospy.Publisher(
name=server_name + STRUCTURE_TOPIC,
data_class=SmaccContainerStructure,
queue_size=1)
# Advertise status publisher
self._status_pub = rospy.Publisher(
name=server_name + STATUS_TOPIC,
data_class=SmaccContainerStatus,
queue_size=1)
# Set transition callback
container.register_transition_cb(self._transition_cb)
# Create thread to constantly publish
self._status_pub_thread = threading.Thread(name=server_name+':status_publisher',target=self._status_pub_loop)
self._structure_pub_thread = threading.Thread(name=server_name+':structure_publisher',target=self._structure_pub_loop)
self._keep_running = False
def start(self):
self._keep_running = True
self._status_pub_thread.start()
self._structure_pub_thread.start()
def stop(self):
self._keep_running = False
def _status_pub_loop(self):
"""Loop to publish the status and structure heartbeats."""
while not rospy.is_shutdown() and self._keep_running:
#TODO
self._publish_status('HEARTBEAT')
try:
end_time = rospy.Time.now() + self._update_rate
while not rospy.is_shutdown() and rospy.Time.now() < end_time:
rospy.sleep(0.1)
except:
pass
def _structure_pub_loop(self):
"""Loop to publish the status and structure heartbeats."""
while not rospy.is_shutdown() and self._keep_running:
self._publish_structure('HEARTBEAT')
try:
end_time = rospy.Time.now() + self._update_rate
while not rospy.is_shutdown() and rospy.Time.now() < end_time:
rospy.sleep(0.1)
except:
pass
def _publish_structure(self, info_str=''):
path = self._path
children = list(self._container.get_children().keys())
internal_outcomes = []
outcomes_from = []
outcomes_to = []
for (outcome, from_label, to_label) in self._container.get_internal_edges():
internal_outcomes.append(str(outcome))
outcomes_from.append(str(from_label))
outcomes_to.append(str(to_label))
container_outcomes = self._container.get_registered_outcomes()
# Construct structure message
structure_msg = SmaccContainerStructure(
Header(stamp = rospy.Time.now()),
path,
children,
internal_outcomes,
outcomes_from,
outcomes_to,
container_outcomes)
try:
self._structure_pub.publish(structure_msg)
except:
if not rospy.is_shutdown():
rospy.logerr("Publishing smacc introspection structure message failed.")
def _publish_status(self, info_str=''):
"""Publish current state of this container."""
# Construct messages
with self._status_pub_lock:
path = self._path
#print str(structure_msg)
# Construct status message
#print self._container.get_active_states()
state_msg = SmaccContainerStatus(
Header(stamp = rospy.Time.now()),
path,
self._container.get_initial_states(),
self._container.get_active_states(),
pickle.dumps(self._container.userdata._data,2),
info_str)
# Publish message
self._status_pub.publish(state_msg)
### Transition reporting
def _transition_cb(self, *args, **kwargs):
"""Transition callback, passed to all internal nodes in the tree.
This callback locks an internal mutex, preventing any hooked transitions
from occurring while we're walking the tree.
"""
info_str = (str(args) + ', ' + str(kwargs))
rospy.logdebug("Transitioning: "+info_str)
self._publish_status(info_str)
def _init_cmd_cb(self, msg):
"""Initialize a tree's state and userdata."""
initial_states = msg.initial_states
local_data = msg.local_data
# Check if this init message is directed at this path
rospy.logdebug('Received init message for path: '+msg.path+' to '+str(initial_states))
if msg.path == self._path:
if all(s in self._container.get_children() for s in initial_states):
ud = UserData()
ud._data = pickle.loads(msg.local_data)
rospy.logdebug("Setting initial state in smacc path: '"+self._path+"' to '"+str(initial_states)+"' with userdata: "+str(ud._data))
# Set the initial state
self._container.set_initial_state(
initial_states,
ud)
# Publish initial state
self._publish_status("REMOTE_INIT")
else:
rospy.logerr("Attempting to set initial state in container '"+self._path+"' to '"+str(initial_states)+"', but this container only has states: "+str(self._container.get_children()))
|
q2_single_agent_eval.py
|
import time
import threading
import pyglet
from argparse import Namespace
from reinforcement_learning.dddqn_policy import DDDQNPolicy
import numpy as np
import torch
from flatland.utils.rendertools import RenderTool
from flatland.utils.graphics_pgl import RailViewWindow
from utils.observation_utils import normalize_observation
from utils.environment_utils import create_default_single_agent_environment
def load_policy(filename, state_size=231, hidden_layer_size=256, seed=None):
# Training parameters
training_parameters = {
'buffer_size': int(1e5),
'batch_size': 32,
'update_every': 8,
'learning_rate': 0.5e-4,
'tau': 1e-3,
'gamma': 0.99,
'buffer_min_size': 0,
'hidden_size': hidden_layer_size,
'use_gpu': False
}
# The action space of flatland is 5 discrete actions
action_size = 5
# Create Double DQN Policy object by loading the network weights from file.
policy = DDDQNPolicy(state_size, action_size, Namespace(**training_parameters), seed=seed)
policy.qnetwork_local = torch.load(filename)
return policy
def evaluate(seed=37429879, timed=False, filename="./rl-weights.pth", debug=False, refresh=1):
# Attempt to load policy from disk.
policy = load_policy(filename, seed=seed)
# Create environment with given seeding.
env, max_steps, _, _, observation_tree_depth, _ = create_default_single_agent_environment(seed+1, timed)
# Fixed environment parameters (note, these must correspond with the training parameters!)
observation_radius = 10
env_renderer = None
if (debug):
env_renderer = RenderTool(env, screen_width=1920, screen_height=1080)
# Create container for the agent actions and observations.
action_dict = dict()
agent_obs = [None] * env.number_of_agents
num_maps = 100
scores = []
successes = 0
for _ in range(0, num_maps):
# Create a new map.
obs, info = env.reset(True, True)
score = 0
if debug:
env_renderer.reset()
env_renderer.render_env(show=True, frames=False, show_observations=False)
time.sleep(refresh)
# Run episode
for _ in range(max_steps - 1):
# Build agent specific observations
for agent in env.get_agent_handles():
if obs[agent]:
agent_obs[agent] = normalize_observation(obs[agent], observation_tree_depth, observation_radius=observation_radius)
# If an action is required, select the action.
for agent in env.get_agent_handles():
action = 0
if info['action_required'][agent]:
action = policy.act(agent_obs[agent], eps=0.08)
#print("Required " + str(action))
action_dict.update({agent: action})
# Environment step
obs, all_rewards, done, info = env.step(action_dict)
if debug:
env_renderer.render_env(show=True, frames=False, show_observations=False)
time.sleep(refresh)
# Track rewards.
score = score + all_rewards[agent]
if done[agent]:
successes = successes + 1
break
# Record scores.
scores.append(score)
print("Successful: %8.2f%%" % (100 * successes / num_maps))
print("Mean reward: %8.2f" % (np.mean(scores)))
print("Median reward: %8.2f" % (np.median(scores)))
def main():
#seed = 37429879
seed = 32617879
timed = True
filename = "./rl-weights-withtimed.pth"
_debug = False
_refresh = 0.05
if (_debug):
window = RailViewWindow()
evalthread = threading.Thread(target=evaluate, args=(seed, timed, filename, _debug, _refresh,))
evalthread.start()
if (_debug):
pyglet.clock.schedule_interval(window.update_texture, 1/120.0)
pyglet.app.run()
evalthread.join()
if __name__ == "__main__":
main()
|
buzzlevel.py
|
# -*- coding: utf-8 -*-
# buzzlevel.py
# Source: https://github.com/DrGFreeman/MineField
#
# MIT License
#
# Copyright (c) 2017 Julien de la Bruere-Terreault <drgfreeman@tuta.io>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import time
from gpiozero import Buzzer
class BuzzLevel:
"""A buzzer class for use in MineField Minecraft game for Raspbery Pi. The
buzzer emits a number of short beeps corresponding to the specified level
(0 to 4). An active buzzer must be connected to GPIO pin 4."""
def __init__(self):
"""Constructor. Returns a BuzzLevel object instance"""
self._buzzer = Buzzer(4) # gpiozero Buzzer object on pin 4.
self._onTime = .01 # beep duration
self._offTime = .19 # beep silence duration
self._level = 0 # beep level initialized to 0
self._active = False # object active state initialized to False
self.run() # activate object
def _beep(self, on):
"""Beeps the buzzer once followed by a silence.
Keyword arguments:
on: Produces a beep if True, produces a silence if False.
"""
if on:
self._buzzer.on()
time.sleep(self._onTime)
self._buzzer.off()
time.sleep(self._offTime)
else:
time.sleep(self._onTime + self._offTime)
def _beepLevel(self):
"""Beeps the buzzer a number of times set by the level attribute followed
by a number of silences so that the total duration is always constant."""
for i in range(self._level):
self._beep(True)
for i in range(5 - self._level):
self._beep(False)
def run(self):
"""Launches the _run method in a dedicated thread so it can run in the
background while the calling program continues.
"""
if not self._active:
thread1 = threading.Thread(target = self._run, args = [])
thread1.start()
def _run(self):
"""Executes the beepLevel method as long as the _active attribute is
True."""
self._active = True
while self._active:
self._beepLevel()
def setLevel(self, level):
"""Sets the buzzer _level attribute.
Keyword arguments:
level: the number of beeps to be produced (0 to 4)
"""
try:
if type(level) != int: # check that level is an integer
raise TypeError("level must be an integer.")
elif level >=0 and level <= 4: # check that level is between 0 and 4
self._level = level # set _level attribute
else:
raise ValueError("level must be between 0 and 4.")
except ValueError:
raise
except TypeError:
raise
def stop(self):
"""Sets the _active attribute to False. The background thread will stop
automatically at the end of the current loop."""
self._active = False
|
decode_wavernn_dualgru_compact_lpc_mband_16bit.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Patrick Lumban Tobing (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from distutils.util import strtobool
import argparse
import logging
import math
import os
import sys
import time
import numpy as np
import soundfile as sf
from scipy.io import wavfile
import torch
import torch.multiprocessing as mp
import matplotlib.pyplot as plt
from utils import find_files
from utils import read_txt, read_hdf5, shape_hdf5
from vcneuvoco import GRU_WAVE_DECODER_DUALGRU_COMPACT_MBAND_CF
import torch.nn.functional as F
from pqmf import PQMF
#import warnings
#warnings.filterwarnings('ignore')
#from torch.distributions.one_hot_categorical import OneHotCategorical
#import torch.nn.functional as F
def pad_list(batch_list, pad_value=0.0):
"""FUNCTION TO PAD VALUE
Args:
batch_list (list): list of batch, where the shape of i-th batch (T_i, C)
pad_value (float): value to pad
Return:
(ndarray): padded batch with the shape (B, T_max, C)
"""
batch_size = len(batch_list)
maxlen = max([batch.shape[0] for batch in batch_list])
#if len(batch_list[0].shape) > 1:
# n_feats = batch_list[0].shape[-1]
# batch_pad = np.zeros((batch_size, maxlen, n_feats))
#else:
# batch_pad = np.zeros((batch_size, maxlen))
#for idx, batch in enumerate(batch_list):
# batch_pad[idx, :batch.shape[0]] = batch
#logging.info(maxlen)
for idx, batch in enumerate(batch_list):
if idx > 0:
batch_pad = np.r_[batch_pad, np.expand_dims(np.pad(batch_list[idx], ((0, maxlen-batch_list[idx].shape[0]), (0, 0)), 'edge'), axis=0)]
else:
batch_pad = np.expand_dims(np.pad(batch_list[idx], ((0, maxlen-batch_list[idx].shape[0]), (0, 0)), 'edge'), axis=0)
# logging.info(batch_list[idx].shape)
# logging.info(np.expand_dims(np.pad(batch_list[idx], ((0, maxlen-batch_list[idx].shape[0]), (0, 0)), 'edge'), axis=0).shape)
# logging.info(batch_pad.shape)
return batch_pad
def decode_generator(feat_list, upsampling_factor=120, string_path='/feat_mceplf0cap', batch_size=1, excit_dim=0):
"""DECODE BATCH GENERATOR
Args:
wav_list (str): list including wav files
batch_size (int): batch size in decoding
upsampling_factor (int): upsampling factor
Return:
(object): generator instance
"""
with torch.no_grad():
shape_list = [shape_hdf5(f, string_path)[0] for f in feat_list]
idx = np.argsort(shape_list)
feat_list = [feat_list[i] for i in idx]
# divide into batch list
n_batch = math.ceil(len(feat_list) / batch_size)
batch_feat_lists = np.array_split(feat_list, n_batch)
batch_feat_lists = [f.tolist() for f in batch_feat_lists]
for batch_feat_list in batch_feat_lists:
batch_feat = []
n_samples_list = []
feat_ids = []
for featfile in batch_feat_list:
## load waveform
if 'mel' in string_path:
if excit_dim > 0:
feat = np.c_[read_hdf5(featfile, '/feat_mceplf0cap')[:,:excit_dim], read_hdf5(featfile, string_path)]
else:
feat = read_hdf5(featfile, string_path)
else:
feat = read_hdf5(featfile, string_path)
# append to list
batch_feat += [feat]
n_samples_list += [feat.shape[0]*upsampling_factor]
feat_ids += [os.path.basename(featfile).replace(".h5", "")]
# convert list to ndarray
batch_feat = pad_list(batch_feat)
# convert to torch variable
batch_feat = torch.FloatTensor(batch_feat)
if torch.cuda.is_available():
batch_feat = batch_feat.cuda()
yield feat_ids, (batch_feat, n_samples_list)
def main():
parser = argparse.ArgumentParser()
# decode setting
parser.add_argument("--feats", required=True,
type=str, help="list or directory of wav files")
parser.add_argument("--checkpoint", required=True,
type=str, help="model file")
parser.add_argument("--config", required=True,
type=str, help="configure file")
parser.add_argument("--outdir", required=True,
type=str, help="directory to save generated samples")
parser.add_argument("--fs", default=22050,
type=int, help="sampling rate")
parser.add_argument("--batch_size", default=1,
type=int, help="number of batch size in decoding")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
# other setting
parser.add_argument("--string_path", default=None,
type=str, help="log interval")
parser.add_argument("--intervals", default=4410,
type=int, help="log interval")
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--GPU_device", default=None,
type=int, help="selection of GPU device")
parser.add_argument("--GPU_device_str", default=None,
type=str, help="selection of GPU device")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
if args.GPU_device is not None or args.GPU_device_str is not None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
if args.GPU_device_str is None:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_device)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU_device_str
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# set log level
if args.verbose > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# load config
config = torch.load(args.config)
logging.info(config)
# get file list
if os.path.isdir(args.feats):
feat_list = sorted(find_files(args.feats, "*.h5"))
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
# prepare the file list for parallel decoding
feat_lists = np.array_split(feat_list, args.n_gpus)
feat_lists = [f_list.tolist() for f_list in feat_lists]
# define gpu decode function
def gpu_decode(feat_list, gpu):
with torch.cuda.device(gpu):
with torch.no_grad():
model_waveform = GRU_WAVE_DECODER_DUALGRU_COMPACT_MBAND_CF(
feat_dim=config.mcep_dim+config.excit_dim,
upsampling_factor=config.upsampling_factor,
hidden_units=config.hidden_units_wave,
hidden_units_2=config.hidden_units_wave_2,
kernel_size=config.kernel_size_wave,
dilation_size=config.dilation_size_wave,
n_quantize=config.n_quantize,
causal_conv=config.causal_conv_wave,
right_size=config.right_size,
n_bands=config.n_bands,
pad_first=True,
lpc=config.lpc)
logging.info(model_waveform)
model_waveform.cuda()
model_waveform.load_state_dict(torch.load(args.checkpoint)["model_waveform"])
model_waveform.remove_weight_norm()
model_waveform.eval()
for param in model_waveform.parameters():
param.requires_grad = False
torch.backends.cudnn.benchmark = True
# define generator
if args.string_path is None:
string_path = config.string_path
else:
string_path = args.string_path
logging.info(string_path)
generator = decode_generator(
feat_list,
batch_size=args.batch_size,
upsampling_factor=config.upsampling_factor,
excit_dim=config.excit_dim,
string_path=string_path)
# decode
time_sample = []
n_samples = []
n_samples_t = []
count = 0
pqmf = PQMF(config.n_bands).cuda()
print(f'{pqmf.subbands} {pqmf.A} {pqmf.taps} {pqmf.cutoff_ratio} {pqmf.beta}')
for feat_ids, (batch_feat, n_samples_list) in generator:
logging.info("decoding start")
start = time.time()
logging.info(batch_feat.shape)
#batch_feat = F.pad(batch_feat.transpose(1,2), (model_waveform.pad_left,model_waveform.pad_right), "replicate").transpose(1,2)
samples = model_waveform.generate(batch_feat)
logging.info(samples.shape) # B x n_bands x T//n_bands
samples = pqmf.synthesis(samples)[:,0].cpu().data.numpy() # B x 1 x T --> B x T
logging.info(samples.shape)
samples_list = samples
time_sample.append(time.time()-start)
n_samples.append(max(n_samples_list))
n_samples_t.append(max(n_samples_list)*len(n_samples_list))
for feat_id, samples, samples_len in zip(feat_ids, samples_list, n_samples_list):
#wav = np.clip(samples[:samples_len], -1, 1)
wav = np.clip(samples[:samples_len], -1, 0.999969482421875)
outpath = os.path.join(args.outdir, feat_id+".wav")
sf.write(outpath, wav, args.fs, "PCM_16")
logging.info("wrote %s." % (outpath))
#break
#figname = os.path.join(args.outdir, feat_id+"_wav.png")
#plt.subplot(2, 1, 1)
#plt.plot(wav_src)
#plt.title("source wave")
#plt.subplot(2, 1, 2)
#plt.plot(wav)
#plt.title("generated wave")
#plt.tight_layout()
#plt.savefig(figname)
#plt.close()
count += 1
#if count >= 3:
#if count >= 6:
#if count >= 1:
# break
logging.info("average time / sample = %.6f sec (%ld samples) [%.3f kHz/s]" % (\
sum(time_sample)/sum(n_samples), sum(n_samples), sum(n_samples)/(1000*sum(time_sample))))
logging.info("average throughput / sample = %.6f sec (%ld samples) [%.3f kHz/s]" % (\
sum(time_sample)/sum(n_samples_t), sum(n_samples_t), sum(n_samples_t)/(1000*sum(time_sample))))
# parallel decode
processes = []
gpu = 0
for i, feat_list in enumerate(feat_lists):
p = mp.Process(target=gpu_decode, args=(feat_list, gpu,))
p.start()
processes.append(p)
gpu += 1
if (i + 1) % args.n_gpus == 0:
gpu = 0
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
school_bell.py
|
#!/usr/bin/python3
# absolute imports
import argparse
import calendar
import json
import logging
import os
import schedule
import sys
import tempfile
from gpiozero import Buzzer
from subprocess import Popen, PIPE
from time import sleep
from threading import Thread
# Relative imports
try:
from .version import version
except (ValueError, ModuleNotFoundError):
version = "VERSION-NOT-FOUND"
# Check platform and set wav player
if sys.platform in ("linux", "linux2"):
_play = "/usr/bin/aplay"
_play_test = [_play, '-d', '1']
elif sys.platform == "darwin":
_play = "/usr/bin/afplay"
_play_test = [_play, '-t', '1']
elif sys.platform in ("win32", "win64"):
raise NotImplementedError('school_bell.py does not work on Windows')
def is_raspberry_pi():
"""Checks if the device is a Rasperry Pi
"""
if not os.path.exists("/proc/device-tree/model"):
return False
with open("/proc/device-tree/model") as f:
model = f.read()
return model.startswith("Raspberry Pi")
def init_logger(prog=None, debug=False):
"""Create the logger object
"""
# create logger
logger = logging.getLogger(prog or 'school-bell')
# log to stdout
streamHandler = logging.StreamHandler(sys.stdout)
streamHandler.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
))
logger.addHandler(streamHandler)
# set logger level
logger.setLevel(logging.DEBUG if debug else logging.INFO)
return logger
def system_call(command: list, log: logging.Logger = None, **kwargs):
"""Execute a system call. Returns `True` on success.
"""
if not isinstance(command, list):
raise TypeError("command should be a list!")
log = log if isinstance(log, logging.Logger) else init_logger(debug=True)
log.debug(' '.join(command))
p = Popen(command, stdout=PIPE, stderr=PIPE, **kwargs)
output, error = p.communicate()
log.debug(output.decode("utf-8"))
if p.returncode != 0:
log.error(error.decode("utf-8"))
return p.returncode == 0
def play(wav: str, log: logging.Logger, test: bool = False):
"""Play the school bell. Returns `True` on success.
"""
return system_call(_play_test + [wav] if test else [_play, wav], log)
def ring(wav, buzzer, trigger, log):
"""Ring the school bell
"""
log.info("ring!")
threads = []
for remote, command in trigger.items():
threads.append(Thread(target=remote_ring,
args=(remote, [command, wav, '&'], log)))
threads.append(Thread(target=play, args=(wav, log)))
if buzzer:
buzzer.on()
for t in threads:
t.start()
for t in threads:
t.join()
if buzzer:
buzzer.off()
def remote_ring(host: str, command: list, log: logging.Logger):
"""Remote ring over ssh. Returns `True` on success.
"""
ssh = ["/usr/bin/ssh",
"-t",
"-o", "ConnectTimeout=1",
"-o", "StrictHostKeyChecking=no",
host]
return system_call(ssh + command, log)
def test_remote_trigger(trigger, log):
"""Test remote ring triggers. Returns the filtered trigger dictionary.
"""
for remote in list(trigger.keys()):
if remote_ring(remote, [trigger[remote], "--help"], log):
log.info(f" remote ring {remote}")
else:
log.warning(f"remote ring test for {remote} failed!")
trigger.pop(remote)
return trigger
class DemoConfig(argparse.Action):
"""Argparse action to print a demo JSON configuration
"""
def __call__(self, parser, namespace, values, option_string=None):
demo = os.path.join(
sys.exec_prefix, 'share', 'school-bell', 'config.json'
)
with open(demo, "r") as demo_config:
print(json.dumps(json.load(demo_config), indent=4))
sys.exit()
class SelfUpdate(argparse.Action):
"""Argparse action to self-update the school-bell code from git.
"""
def __call__(self, parser, namespace, values, option_string=None):
with tempfile.TemporaryDirectory() as tmp:
log = init_logger(debug=True)
git = 'https://github.com/psmsmets/school-bell.git'
src = os.path.join(tmp, 'school-bell')
if system_call(['git', 'clone', git, tmp], log):
system_call(['pip', 'install', '--src', src, '.'], log)
log.info('school-bell updated.')
sys.exit()
def main():
"""Main script function.
"""
prog = 'school-bell'
info = 'Python scheduled ringing of the school bell.'
# arguments
parser = argparse.ArgumentParser(prog=prog, description=info)
parser.add_argument(
'-b', '--buzz', metavar='..', type=int, nargs='?',
default=False, const=17,
help=('Buzz via RPi GPIO while the WAV audio file plays '
'(default: %(default)s)')
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='Make the operation a lot more talkative'
)
parser.add_argument(
'--demo', action=DemoConfig, nargs=0,
help='Print the demo JSON configuration and exit'
)
parser.add_argument(
'--update', action=SelfUpdate, nargs=0,
help='Update %(prog)s from git.'
)
parser.add_argument(
'--version', action='version', version=version,
help='Print the version and exit'
)
parser.add_argument(
'config', type=str, help='JSON configuration (string or file)'
)
# parse arguments
args = parser.parse_args()
# create logger object
log = init_logger(prog, args.debug)
# header
log.info(info)
log.info(f"version = {version}")
# parse json config
log.info(f"config = {args.config}")
if os.path.isfile(os.path.expandvars(args.config)):
with open(os.path.expandvars(args.config)) as f:
args.config = json.load(f)
else:
try:
args.config = json.loads(args.config)
except json.decoder.JSONDecodeError:
err = "JSON configuration should be a string or file!"
log.error(err)
raise RuntimeError(err)
# check if all arguments are present
for key in ('schedule', 'trigger', 'wav'):
if key not in args.config:
err = f"JSON config should contain the dictionary '{key}'!"
log.error(err)
raise KeyError(err)
if not isinstance(args.config[key], dict):
err = f"JSON config '{key}' should be a dictionary!"
log.error(err)
raise TypeError(err)
# get root
root = args.config['root'] if 'root' in args.config else ''
log.info(f"root = {root}")
# verify wav
log.info("wav =")
for key, wav in args.config['wav'].items():
log.info(f" {key}: {wav}")
root_wav = os.path.expandvars(os.path.join(root, wav))
if not os.path.isfile(root_wav):
err = f"File '{root_wav}' not found!"
log.error(err)
raise FileNotFoundError(err)
if not play(root_wav, log, test=True):
err = f"Could not play {root_wav}!"
log.error(err)
raise RuntimeError(err)
# verify remote triggers
log.info("trigger =")
trigger = test_remote_trigger(
args.config['trigger'] if 'trigger' in args.config else dict(), log
)
# buzz?
buzzer = False
log.info(f"buzzer = {args.buzz}")
if args.buzz:
if is_raspberry_pi():
buzzer = Buzzer(args.buzz)
else:
log.warning("Host is not a Raspberry Pi: buzzer disabled!")
# ring wrapper
def _ring(wav):
ring(wav, buzzer, trigger, log)
# create schedule
log.info("schedule =")
for day, times in args.config['schedule'].items():
day_num = list(calendar.day_abbr).index(day)
day_name = calendar.day_name[day_num].lower()
for time, wav_key in times.items():
log.info(f" ring every {day} at {time} with {wav_key}")
try:
wav = os.path.join(root, args.config['wav'][f"{wav_key}"])
except KeyError:
err = f"wav key {wav_key} is not related to any sample!"
log.error(err)
raise KeyError(err)
eval(f"schedule.every().{day_name}.at(\"{time}\").do(_ring, wav)")
# run schedule
log.info('Schedule started')
while True:
schedule.run_pending()
sleep(.5)
if __name__ == "__main__":
main()
|
Pic_Spider.py
|
# -*- coding: utf-8 -*-
"""
Author : Lamadog
Time : 2019/12/10
Email : xwen.xi@icloud.com
"""
import os
import re
import threading
import time
from urllib import request, parse
import multiprocessing
from config import *
class Spider(object):
def __init__(self, word):
self.word = word
if not os.path.exists(f'./{self.word}'):
os.makedirs(f'./{self.word}')
def start_img(self):
urls = []
k = 0
req = request.Request(url.format(parse.quote(self.word)), headers=headers)
print(url.format(self.word))
while True:
if k == 0:
try:
html = request.urlopen(req, timeout=30).read().decode('utf-8')
except ConnectionError as ex:
raise ex
for item in [url1, url2]:
urls.extend(re.findall(item, html))
urls = list(set(urls))
yield urls[k]
k += 1
k = 0 if k >= len(urls) else k
def down(self, pre_down, who):
page = 0
pic_url = pre_down.send(None)
while True:
print(f'Thread-{who}', pic_url)
req = request.Request(pic_url, headers=headers)
try:
res = request.urlopen(req, timeout=30)
time.sleep(2)
except ConnectionError as ex:
pre_down.close()
raise print(ex)
with open(f'./{self.word}/{page}.jpg', 'wb') as fp:
fp.write(res.read())
fp.flush()
fp.close()
page += 1
pic_url = pre_down.send('finish!')
if __name__ == "__main__":
spider = Spider('刘亦菲')
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=spider.down, args=(spider.start_img(), i))
t.start()
|
playlist.py
|
from flask import Blueprint, request, jsonify
from jukebox.src.Track import Track
from jukebox.src.util import *
import threading
playlist = Blueprint('playlist', __name__)
@playlist.route("/add", methods=['POST'])
@requires_auth
def add():
"""
Adds a song to the playlist. Song information are stored in request.form.to_dict(). This dict generally comes from
the search.
"""
track_dict = request.form.to_dict()
app.logger.info("Adding track %s", track_dict["url"])
# track["user"] = session["user"]
with app.database_lock:
if not Track.does_track_exist(app.config["DATABASE_PATH"], track_dict["url"]):
Track.insert_track(app.config["DATABASE_PATH"], track_dict)
track = Track.import_from_url(app.config["DATABASE_PATH"], track_dict["url"])
track.insert_track_log(app.config["DATABASE_PATH"], session['user'])
else:
track = Track.import_from_url(app.config["DATABASE_PATH"], track_dict["url"])
track.insert_track_log(app.config["DATABASE_PATH"], session['user'])
# we refresh the track in database
track = Track.refresh_by_url(app.config["DATABASE_PATH"], track_dict["url"], obsolete=0)
track.user = session['user']
app.logger.info(track)
with app.playlist_lock:
app.playlist.append(track.serialize())
if len(app.playlist) == 1:
threading.Thread(target=app.player_worker).start()
return "ok"
@playlist.route("/remove", methods=['POST'])
@requires_auth
def remove():
"""supprime la track de la playlist"""
track = request.form
with app.playlist_lock:
for track_p in app.playlist:
if track_p["randomid"] == int(track["randomid"]):
if app.playlist.index(track_p) == 0:
app.logger.info("Removing currently playing track")
with app.mpv_lock:
app.mpv.quit()
else:
app.playlist.remove(track_p)
return "ok"
app.logger.info("Track " + track["url"] + " not found !")
return "nok"
@playlist.route("/volume", methods=['POST'])
@requires_auth
def volume():
if request.method == 'POST':
if hasattr(app, 'mpv') and app.mpv is not None:
app.logger.info("request volume: " + str(request.form["volume"]))
app.mpv.volume = request.form["volume"]
# set_volume(request.form["volume"])
return "ok"
@playlist.route("/suggest")
def suggest():
n = 5 # number of songs to display in the suggestions
if "n" in request.args:
n = int(request.args.get("n"))
result = []
nbr = 0
while nbr < n: # we use a while to be able not to add a song
# if it is blacklisted
with app.database_lock:
track = Track.get_random_track(app.config["DATABASE_PATH"])
if track is None:
nbr += 1
elif track.blacklisted == 0 and track.obsolete == 0 and track.source in app.config["SEARCH_BACKENDS"]:
result.append(track.serialize())
nbr += 1
return jsonify(result)
|
progress.py
|
import contextlib
import sys
import threading
import time
from timeit import default_timer
from ..callbacks import Callback
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return f"{h:2.0f}hr {m:2.0f}min {s:4.1f}s"
elif m:
return f"{m:2.0f}min {s:4.1f}s"
else:
return f"{s:4.1f}s"
class ProgressBar(Callback):
"""A progress bar for dask.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar
dt : float, optional
Update resolution in seconds, default is 0.1 seconds
out : file object, optional
File object to which the progress bar will be written
It can be ``sys.stdout``, ``sys.stderr`` or any other file object able to write ``str`` objects
Default is ``sys.stdout``
Examples
--------
Below we create a progress bar with a minimum threshold of 1 second before
displaying. For cheap computations nothing is shown:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_fast_computation.compute()
But for expensive computations a full progress bar is displayed:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_slow_computation.compute()
[########################################] | 100% Completed | 10.4 s
The duration of the last computation is available as an attribute
>>> pbar = ProgressBar() # doctest: +SKIP
>>> with pbar: # doctest: +SKIP
... out = some_computation.compute()
[########################################] | 100% Completed | 10.4 s
>>> pbar.last_duration # doctest: +SKIP
10.4
You can also register a progress bar so that it displays for all
computations:
>>> pbar = ProgressBar() # doctest: +SKIP
>>> pbar.register() # doctest: +SKIP
>>> some_slow_computation.compute() # doctest: +SKIP
[########################################] | 100% Completed | 10.4 s
"""
def __init__(self, minimum=0, width=40, dt=0.1, out=None):
if out is None:
# Warning, on windows, stdout can still be None if
# an application is started as GUI Application
# https://docs.python.org/3/library/sys.html#sys.__stderr__
out = sys.stdout
self._minimum = minimum
self._width = width
self._dt = dt
self._file = out
self.last_duration = 0
def _start(self, dsk):
self._state = None
self._start_time = default_timer()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.daemon = True
self._timer.start()
def _pretask(self, key, dsk, state):
self._state = state
if self._file is not None:
self._file.flush()
def _finish(self, dsk, state, errored):
self._running = False
self._timer.join()
elapsed = default_timer() - self._start_time
self.last_duration = elapsed
if elapsed < self._minimum:
return
if not errored:
self._draw_bar(1, elapsed)
else:
self._update_bar(elapsed)
if self._file is not None:
self._file.write("\n")
self._file.flush()
def _timer_func(self):
"""Background thread for updating the progress bar"""
while self._running:
elapsed = default_timer() - self._start_time
if elapsed > self._minimum:
self._update_bar(elapsed)
time.sleep(self._dt)
def _update_bar(self, elapsed):
s = self._state
if not s:
self._draw_bar(0, elapsed)
return
ndone = len(s["finished"])
ntasks = sum(len(s[k]) for k in ["ready", "waiting", "running"]) + ndone
if ndone < ntasks:
self._draw_bar(ndone / ntasks if ntasks else 0, elapsed)
def _draw_bar(self, frac, elapsed):
bar = "#" * int(self._width * frac)
percent = int(100 * frac)
elapsed = format_time(elapsed)
msg = "\r[{0:<{1}}] | {2}% Completed | {3}".format(
bar, self._width, percent, elapsed
)
with contextlib.suppress(ValueError):
if self._file is not None:
self._file.write(msg)
self._file.flush()
|
Monitor.py
|
import os, logging
from flask import request, render_template, jsonify
from flask_socketio import emit
class MonitorSocket(object):
def __init__(self):
self.events = {'publish-sensors':self.sensors,'publish-actions':self.actions}
self.args = {'emitter':None,'namespace':'default','listen':False}
self.namespace = 'default'
self.emitter = None
def decorate(self,arguments):
from Util import decorate
return decorate(self,arguments)
def create(self,socket):
self.emitter = self.args.get('emitter')
if None == self.emitter:
logging.error('no event dispatcher set')
return self
self.external = socket
self.namespace = self.args.get('namespace')
self.external.on('connect',namespace='/%s'%self.namespace)(self.connect) # == @socketio.on('connect',namespace='/namespace')
self.external.on('request',namespace='/%s'%self.namespace)(self.request) # == @socketio.on('request',namespace='/namespace')
self.external.on('disconnect',namespace='/%s'%self.namespace)(self.disconnect) # == @socketio.on('disconnect',namespace='/namespace')
self.external.on('error',namespace='/%s'%self.namespace)(self.error) # == @socketio.on_error(/namespace')
logging.info('%s socket created'%self.namespace)
return self
def connect(self):
logging.info('connect-%s'%self.namespace)
self.external.emit('connected', {'call':'%s-connected'%self.namespace,'id':'connect-%s'%self.namespace},namespace='/%s'%self.namespace)
def request(self,data):
logging.debug('request-%s'%self.namespace)
data['call'] = data['request']
data['host'] = request.host # print dir(request)
data['sid'] = request.sid
self.external.emit('response', {'call':'%s-request'%self.namespace,'id':'response-%s'%self.namespace,'origin':data},namespace='/%s'%self.namespace)
def disconnect(self):
logging.info('%s disconnected from %s'%(request.host,self.namespace))
def error(self,error):
logging.error('cameras error %s'%str(e))
def sensors(self,data):
if self.args.get('listen'):
self.external.emit('response', {'call':'%s-sensors'%self.namespace,'id':'response-%s'%self.namespace,'sensors':data},namespace='/%s'%self.namespace)
def actions(self,data):
if self.args.get('listen'):
self.external.emit('response', {'call':'%s-actions'%self.namespace,'id':'response-%s'%self.namespace,'actions':data},namespace='/%s'%self.namespace)
class MonitorErrors(object):
def __init__(self):
self.args = {'path':'errors','errors':[]}
# unsupported 101,102,103,200,201,202,203,204,205,206,207,208,226,300,301,302,303,304,305,306,307,308,402,407,418,421,422,423,424,426,506,507,508,510,511
self.errors = [400,401,403,404,405,406,408,409,410,411,412,413,414,415,416,417,428,429,431,451,500,501,502,503,504,505]
def decorate(self,arguments):
keys = self.args.keys()
for key in arguments:
if key in keys:
self.args[key] = arguments[key]
return self
def create(self,cgi):
custom = self.args.get('errors')
for code in custom:
cgi.register_error_handler(int(code),self.handler)
for code in self.errors:
if not code in custom:
cgi.register_error_handler(int(code),self.default)
def default(self,error):
if hasattr(error, 'errno'): # flask_login.login_required fail
return render_template('%s/500.html'%(self.args.get('path'),500)),500
else:
return render_template('%s/default.html'%self.args.get('path'),code=error.code,name=error.name,description=error.description,message=error.message,args=error.args,response=error.response),error.code
def handler(self,error):
if hasattr(error, 'errno'): # flask_login.login_required fail error.name = template
return render_template('%s/500.html'%(self.args.get('path'),500)),500
else: # flask
return render_template('%s/%s.html'%(self.args.get('path'),error.code)),error.code
class MonitorRoutes(object):
def __init__(self):
self.args = {}
self.routes = {'/':self.index}
def decorate(self,arguments):
from Util import decorate
return decorate(self,arguments)
def create(self,cgi):
for key in self.routes.keys():
cgi.add_url_rule(key,view_func=self.routes.get(key))
def index(self):
return render_template('monitor.html',title='driver monitor'),200
class Monitor(object):
def __init__(self,folder=os.getcwd()):
self.events = {'push-sio':self.push,'start-monitor':self.create,'monitor-options':self.decorate}
self.args = {'emitter':None,'host':'0.0.0.0','port':5000,'logger':None,'debug':False,'deamon':True,'namespace':'default'}
from flask import Flask
self.cgi = Flask(__name__,template_folder=folder,static_folder=folder)
from flask_socketio import SocketIO
# async_mode eventlet|gevent|threading
self.socket = SocketIO(self.cgi,async_mode='threading',debug=self.args.get('debug')) # eventlet is best performance, but threading works
self.socket.on_error_default(self.error) # == @socketio.on_error_default | socketio.on_error(None)(handler)
MonitorRoutes().create(self.cgi)
self.pusher = MonitorSocket().create(self.socket)
def decorate(self,arguments):
from Util import decorate
self.pusher.decorate(arguments)
return decorate(self,arguments)
def create(self,data={}):
self.cgi.config['HOST'] = self.args.get('host')
self.cgi.config['PORT'] = self.args.get('port')
self.cgi.config['DEBUG'] = self.args.get('debug')
if not None == self.args.get('logger'):
# self.cgi.logger = self.args.get('logger') # error can't set attribute
if(0 < len(self.cgi.logger.handlers)):
self.cgi.logger.handlers.pop()
self.cgi.logger.addHandler(self.args.get('logger'))
from threading import Thread
self.thread = Thread(target=self.cgi.run)
self.thread.setDaemon(self.args.get('deamon'))
self.thread.start()
self.pusher.create(self.socket)
return self
def error(self,error):
logging.error('default socket error %s'%str(error))
def push(self,data):
namespace = data.get('namespace')
self.socket.emit('response',{'call':'%s-got'%namespace,'id':'push-%s'%namespace,'data':data},namespace='/%s'%namespace)
|
test_channel_e2e.py
|
import unittest
import random
import string
import multiprocessing
from multiprocessing import Process
from multisock.channel import Channel
from multisock.crypter import Crypter
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def do_send(sender, msg):
sender.send(msg)
def do_recv(receiver, result_buffer):
(msg, sender) = receiver.recv()
result_buffer.put(msg)
return result_buffer
def do_send_object(sender, obj):
print('object sent')
sender.send_object(obj)
def do_recv_object(receiver, result_buffer):
(msg, sender) = receiver.recv_object()
print('object received')
result_buffer.put(msg)
return result_buffer
class Test_Channel_E2E(unittest.TestCase):
def test_message_exchange(self):
crypto = Crypter('pwd', 'passphrase')
sender = Channel('224.1.1.1', 1234, 2048, '0.0.0.0', crypto)
receiver = Channel('224.1.1.1', 1234, 2048, '0.0.0.0', crypto)
msg_to_send = get_random_string(1024)
sender_th = Process(target=do_send, args=(sender, msg_to_send,))
# creating multiprocessing Queue
results = multiprocessing.Queue()
receiver_th = Process(target=do_recv, args=(receiver, results))
# Init threads
sender_th.start()
receiver_th.start()
# Start threads
sender_th.join()
receiver_th.join()
# Close Channels
sender.close()
receiver.close()
self.assertTrue(results.qsize() == 1)
received_msg = results.get()
self.assertTrue(received_msg == msg_to_send)
def test_object_exchange(self):
crypto = Crypter('pwd', 'passphrase')
sender = Channel('224.1.1.1', 1234, 2048, '0.0.0.0', crypto)
receiver = Channel('224.1.1.1', 1234, 2048, '0.0.0.0', crypto)
msg_to_send = {'number': get_random_string(1024), 'str': 'Hello world', 'bool': True}
sender_th = Process(target=do_send_object, args=(sender, msg_to_send,))
# creating multiprocessing Queue
results = multiprocessing.Queue()
receiver_th = Process(target=do_recv_object, args=(receiver, results))
# Init threads
sender_th.start()
receiver_th.start()
# Start threads
sender_th.join()
receiver_th.join()
# Close Channels
sender.close()
receiver.close()
self.assertTrue(results.qsize() >= 1)
received_msg = results.get()
self.assertTrue(received_msg == msg_to_send)
def test_object_exchange_unencrypted(self):
sender = Channel('224.1.1.1', 1234, 2048, '0.0.0.0')
receiver = Channel('224.1.1.1', 1234, 2048, '0.0.0.0')
msg_to_send = {'number': get_random_string(1024), 'str': 'Hello world', 'bool': True}
sender_th = Process(target=do_send_object, args=(sender, msg_to_send,))
# creating multiprocessing Queue
results = multiprocessing.Queue()
receiver_th = Process(target=do_recv_object, args=(receiver, results))
# Init threads
sender_th.start()
receiver_th.start()
# Start threads
sender_th.join()
receiver_th.join()
# Close Channels
sender.close()
receiver.close()
self.assertTrue(results.qsize() >= 1)
received_msg = results.get()
self.assertTrue(received_msg == msg_to_send)
|
audiofeeds.py
|
# -*- coding: utf-8 -*-
# Copyright 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation
import cPickle as pickle
import os
import sys
import threading
import time
from gi.repository import Gtk, GLib, Pango, Gdk
from quodlibet import config
from quodlibet import const
from quodlibet import formats
from quodlibet import qltk
from quodlibet import util
from quodlibet.browsers._base import Browser
from quodlibet.formats._audio import AudioFile
from quodlibet.formats.remote import RemoteFile
from quodlibet.qltk.downloader import DownloadWindow
from quodlibet.qltk.getstring import GetStringDialog
from quodlibet.qltk.msg import ErrorMessage
from quodlibet.qltk.songsmenu import SongsMenu
from quodlibet.qltk.views import AllTreeView
from quodlibet.qltk.x import ScrolledWindow, Alignment, Button
FEEDS = os.path.join(const.USERDIR, "feeds")
DND_URI_LIST, DND_MOZ_URL = range(2)
# Migration path for pickle
sys.modules["browsers.audiofeeds"] = sys.modules[__name__]
class InvalidFeed(ValueError):
pass
class Feed(list):
def __init__(self, uri):
self.name = _("Unknown")
self.uri = uri
self.changed = False
self.website = ""
self.__lastgot = 0
def get_age(self):
return time.time() - self.__lastgot
@staticmethod
def __fill_af(feed, af):
try:
af["title"] = feed.title or _("Unknown")
except:
af["title"] = _("Unknown")
try:
af["date"] = "%04d-%02d-%02d" % feed.modified_parsed[:3]
except (AttributeError, TypeError):
pass
for songkey, feedkey in [
("website", "link"),
("description", "tagline"),
("language", "language"),
("copyright", "copyright"),
("organization", "publisher"),
("license", "license")]:
try:
value = getattr(feed, feedkey)
except:
pass
else:
if value and value not in af.list(songkey):
af.add(songkey, value)
try:
author = feed.author_detail
except AttributeError:
try:
author = feed.author
except AttributeError:
pass
else:
if author and author not in af.list("artist"):
af.add('artist', author)
else:
try:
if author.email and author.email not in af.list("contact"):
af.add("contact", author.email)
except AttributeError:
pass
try:
if author.name and author.name not in af.list("artist"):
af.add("artist", author.name)
except AttributeError:
pass
try:
values = feed.contributors
except AttributeError:
pass
else:
for value in values:
try:
value = value.name
except AttributeError:
pass
else:
if value and value not in af.list("performer"):
af.add("performer", value)
try:
af["~#length"] = util.parse_time(feed.itunes_duration)
except (AttributeError, ValueError):
pass
try:
values = dict(feed.categories).values()
except AttributeError:
pass
else:
for value in values:
if value and value not in af.list("genre"):
af.add("genre", value)
def parse(self):
try:
doc = feedparser.parse(self.uri)
except:
return False
try:
album = doc.channel.title
except AttributeError:
return False
if album:
self.name = album
else:
self.name = _("Unknown")
defaults = AudioFile({"feed": self.uri})
try:
self.__fill_af(doc.channel, defaults)
except:
return False
entries = []
uris = set()
for entry in doc.entries:
try:
for enclosure in entry.enclosures:
try:
if ("audio" in enclosure.type or
"ogg" in enclosure.type or
formats.filter(enclosure.url)):
uri = enclosure.url.encode('ascii', 'replace')
try:
size = enclosure.length
except AttributeError:
size = 0
entries.append((uri, entry, size))
uris.add(uri)
break
except AttributeError:
pass
except AttributeError:
pass
for entry in list(self):
if entry["~uri"] not in uris:
self.remove(entry)
else:
uris.remove(entry["~uri"])
entries.reverse()
for uri, entry, size in entries:
if uri in uris:
song = RemoteFile(uri)
song["~#size"] = size
song.fill_metadata = False
song.update(defaults)
song["album"] = self.name
try:
self.__fill_af(entry, song)
except:
pass
else:
self.insert(0, song)
self.__lastgot = time.time()
return bool(uris)
class AddFeedDialog(GetStringDialog):
def __init__(self, parent):
super(AddFeedDialog, self).__init__(
qltk.get_top_parent(parent), _("New Feed"),
_("Enter the location of an audio feed:"),
okbutton=Gtk.STOCK_ADD)
def run(self):
uri = super(AddFeedDialog, self).run()
if uri:
return Feed(uri.encode('ascii', 'replace'))
else:
return None
class AudioFeeds(Browser, Gtk.VBox):
__gsignals__ = Browser.__gsignals__
__feeds = Gtk.ListStore(object) # unread
headers = ("title artist performer ~people album date website language "
"copyright organization license contact").split()
name = _("Audio Feeds")
accelerated_name = _("_Audio Feeds")
priority = 20
__last_folder = const.HOME
def pack(self, songpane):
container = qltk.RHPaned()
self.show()
container.pack1(self, True, False)
container.pack2(songpane, True, False)
return container
def unpack(self, container, songpane):
container.remove(songpane)
container.remove(self)
@staticmethod
def cell_data(col, render, model, iter, data):
if model[iter][0].changed:
render.markup = "<b>%s</b>" % util.escape(model[iter][0].name)
else:
render.markup = util.escape(model[iter][0].name)
render.set_property('markup', render.markup)
@classmethod
def changed(klass, feeds):
for row in klass.__feeds:
if row[0] in feeds:
row[0].changed = True
row[0] = row[0]
AudioFeeds.write()
@classmethod
def write(klass):
feeds = [row[0] for row in klass.__feeds]
f = file(FEEDS, "wb")
pickle.dump(feeds, f, pickle.HIGHEST_PROTOCOL)
f.close()
@classmethod
def init(klass, library):
try:
feeds = pickle.load(file(FEEDS, "rb"))
except (pickle.PickleError, EnvironmentError, EOFError):
pass
else:
for feed in feeds:
klass.__feeds.append(row=[feed])
GLib.idle_add(klass.__do_check)
@classmethod
def __do_check(klass):
thread = threading.Thread(target=klass.__check, args=())
thread.setDaemon(True)
thread.start()
@classmethod
def __check(klass):
for row in klass.__feeds:
feed = row[0]
if feed.get_age() < 2 * 60 * 60:
continue
elif feed.parse():
feed.changed = True
row[0] = feed
klass.write()
GLib.timeout_add(60 * 60 * 1000, klass.__do_check)
def Menu(self, songs, songlist, library):
menu = SongsMenu(library, songs, parent=self)
if len(songs) == 1:
item = qltk.MenuItem(_("_Download..."), Gtk.STOCK_CONNECT)
item.connect('activate', self.__download, songs[0]("~uri"))
item.set_sensitive(not songs[0].is_file)
else:
songs = filter(lambda s: not s.is_file, songs)
uris = [song("~uri") for song in songs]
item = qltk.MenuItem(_("_Download..."), Gtk.STOCK_CONNECT)
item.connect('activate', self.__download_many, uris)
item.set_sensitive(bool(songs))
menu.preseparate()
menu.prepend(item)
return menu
def __download_many(self, activator, sources):
chooser = Gtk.FileChooserDialog(
title=_("Download Files"), parent=qltk.get_top_parent(self),
action=Gtk.FileChooserAction.CREATE_FOLDER,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
chooser.set_current_folder(self.__last_folder)
resp = chooser.run()
if resp == Gtk.ResponseType.OK:
target = chooser.get_filename()
if target:
type(self).__last_folder = os.path.dirname(target)
for i, source in enumerate(sources):
base = os.path.basename(source)
if not base:
base = ("file%d" % i) + (
os.path.splitext(source)[1] or ".audio")
fulltarget = os.path.join(target, base)
DownloadWindow.download(source, fulltarget, self)
chooser.destroy()
def __download(self, activator, source):
chooser = Gtk.FileChooserDialog(
title=_("Download File"), parent=qltk.get_top_parent(self),
action=Gtk.FileChooserAction.SAVE,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
chooser.set_current_folder(self.__last_folder)
name = os.path.basename(source)
if name:
chooser.set_current_name(name)
resp = chooser.run()
if resp == Gtk.ResponseType.OK:
target = chooser.get_filename()
if target:
type(self).__last_folder = os.path.dirname(target)
DownloadWindow.download(source, target, self)
chooser.destroy()
def __init__(self, library, main):
super(AudioFeeds, self).__init__(spacing=6)
self.__view = view = AllTreeView()
self.__render = render = Gtk.CellRendererText()
render.set_property('ellipsize', Pango.EllipsizeMode.END)
col = Gtk.TreeViewColumn("Audio Feeds", render)
col.set_cell_data_func(render, AudioFeeds.cell_data)
view.append_column(col)
view.set_model(self.__feeds)
view.set_rules_hint(True)
view.set_headers_visible(False)
swin = ScrolledWindow()
swin.set_shadow_type(Gtk.ShadowType.IN)
swin.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
swin.add(view)
self.pack_start(swin, True, True, 0)
new = Button(_("_New"), Gtk.STOCK_ADD, Gtk.IconSize.MENU)
new.connect('clicked', self.__new_feed)
view.get_selection().connect('changed', self.__changed)
view.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
view.connect('popup-menu', self.__popup_menu)
targets = [
("text/uri-list", 0, DND_URI_LIST),
("text/x-moz-url", 0, DND_MOZ_URL)
]
targets = [Gtk.TargetEntry.new(*t) for t in targets]
view.drag_dest_set(Gtk.DestDefaults.ALL, targets, Gdk.DragAction.COPY)
view.connect('drag-data-received', self.__drag_data_received)
view.connect('drag-motion', self.__drag_motion)
view.connect('drag-leave', self.__drag_leave)
self.connect_object('destroy', self.__save, view)
self.pack_start(Alignment(new, left=3, bottom=3), False, True, 0)
for child in self.get_children():
child.show_all()
def __drag_motion(self, view, ctx, x, y, time):
targets = [t.name() for t in ctx.list_targets()]
if "text/x-quodlibet-songs" not in targets:
view.get_parent().drag_highlight()
return True
return False
def __drag_leave(self, view, ctx, time):
view.get_parent().drag_unhighlight()
def __drag_data_received(self, view, ctx, x, y, sel, tid, etime):
view.emit_stop_by_name('drag-data-received')
targets = [
("text/uri-list", 0, DND_URI_LIST),
("text/x-moz-url", 0, DND_MOZ_URL)
]
targets = [Gtk.TargetEntry.new(*t) for t in targets]
view.drag_dest_set(Gtk.DestDefaults.ALL, targets, Gdk.DragAction.COPY)
if tid == DND_URI_LIST:
uri = sel.get_uris()[0]
elif tid == DND_MOZ_URL:
uri = sel.data.decode('utf16', 'replace').split('\n')[0]
else:
ctx.finish(False, False, etime)
return
ctx.finish(True, False, etime)
feed = Feed(uri.encode("ascii", "replace"))
feed.changed = feed.parse()
if feed:
self.__feeds.append(row=[feed])
AudioFeeds.write()
else:
ErrorMessage(
self, _("Unable to add feed"),
_("<b>%s</b> could not be added. The server may be down, "
"or the location may not be an audio feed.") %
util.escape(feed.uri)).run()
def __popup_menu(self, view):
model, paths = view.get_selection().get_selected_rows()
menu = Gtk.Menu()
refresh = Gtk.ImageMenuItem(Gtk.STOCK_REFRESH, use_stock=True)
delete = Gtk.ImageMenuItem(Gtk.STOCK_DELETE, use_stock=True)
refresh.connect_object(
'activate', self.__refresh, [model[p][0] for p in paths])
delete.connect_object(
'activate', map, model.remove, map(model.get_iter, paths))
menu.append(refresh)
menu.append(delete)
menu.show_all()
menu.connect('selection-done', lambda m: m.destroy())
# XXX: keep the menu arround
self.__menu = menu
return view.popup_menu(menu, 0, Gtk.get_current_event_time())
def __save(self, view):
AudioFeeds.write()
def __refresh(self, feeds):
changed = filter(Feed.parse, feeds)
AudioFeeds.changed(changed)
def activate(self):
self.__changed(self.__view.get_selection())
def __changed(self, selection):
model, paths = selection.get_selected_rows()
if model and paths:
songs = []
for path in paths:
model[path][0].changed = False
songs.extend(model[path][0])
self.emit('songs-selected', songs, True)
config.set("browsers", "audiofeeds",
"\t".join([model[path][0].name for path in paths]))
def __new_feed(self, activator):
feed = AddFeedDialog(self).run()
if feed is not None:
feed.changed = feed.parse()
if feed:
self.__feeds.append(row=[feed])
AudioFeeds.write()
else:
ErrorMessage(
self, _("Unable to add feed"),
_("<b>%s</b> could not be added. The server may be down, "
"or the location may not be an audio feed.") %
util.escape(feed.uri)).run()
def restore(self):
try:
names = config.get("browsers", "audiofeeds").split("\t")
except:
pass
else:
self.__view.select_by_func(lambda r: r[0].name in names)
browsers = []
try:
import feedparser
except ImportError:
print_w(_("Could not import %s. Audio Feeds browser disabled.")
% "python-feedparser")
else:
from quodlibet import app
if not app.player or app.player.can_play_uri("http://"):
browsers = [AudioFeeds]
else:
print_w(_("The current audio backend does not support URLs, "
"Audio Feeds browser disabled."))
|
test_run_example.py
|
'''
This test tests whether starting a `run_ogcore_example.py` run of the model does
not break down (is still running) after 5 minutes or 300 seconds.
'''
import multiprocessing
import time
import os
import sys
import importlib.util
import shutil
from pathlib import Path
import pytest
def call_run_ogcore_example():
cur_path = os.path.split(os.path.abspath(__file__))[0]
path = Path(cur_path)
roe_fldr = os.path.join(path.parent.parent, "run_examples")
roe_file_path = os.path.join(roe_fldr, "run_ogcore_example.py")
spec = importlib.util.spec_from_file_location(
'run_ogcore_example.py', roe_file_path)
roe_module = importlib.util.module_from_spec(spec)
sys.modules['run_ogcore_example.py'] = roe_module
spec.loader.exec_module(roe_module)
roe_module.main()
@pytest.mark.local
def test_run_ogcore_example(f=call_run_ogcore_example):
p = multiprocessing.Process(
target=f, name="run_ogcore_example", args=())
p.start()
time.sleep(300)
if p.is_alive():
p.terminate()
p.join()
timetest = True
else:
print("run_ogcore_example did not run for minimum time")
timetest = False
print('timetest ==', timetest)
# Delete directory created by run_ogcore_example.py
cur_path = os.path.split(os.path.abspath(__file__))[0]
path = Path(cur_path)
roe_output_dir = os.path.join(path.parent.parent, "run_examples",
"OUTPUT_BASELINE")
shutil.rmtree(roe_output_dir)
assert timetest
|
CycleModelServing.py
|
from http.server import BaseHTTPRequestHandler
import os
import subprocess
import time
from typing import ClassVar, Dict, List, Optional, Tuple
from onceml.components.base import BaseComponent, BaseExecutor
from onceml.orchestration.Workflow.types import PodContainer
from onceml.types.artifact import Artifact
from onceml.types.channel import Channels
from onceml.types.state import State
import onceml.types.channel as channel
import threading
import shutil
from onceml.templates import ModelServing
import json
from onceml.utils.logger import logger
from onceml.thirdParty.PyTorchServing import run_ts_serving, outputMar
from onceml.thirdParty.PyTorchServing import TS_PROPERTIES_PATH, TS_INFERENCE_PORT, TS_MANAGEMENT_PORT
from .Utils import generate_onceml_config_json, queryModelIsExist, registerModelJob, get_handler_path, get_handler_module
import pathlib
from onceml.utils.json_utils import objectDumps
import onceml.global_config as global_config
import onceml.utils.pipeline_utils as pipeline_utils
from deprecated.sphinx import deprecated
from onceml.types.component_msg import Component_Data_URL
class _executor(BaseExecutor):
def __init__(self):
super().__init__()
self.ensemble_models = []
'''
model serving template class
'''
self.model_serving_cls = None
self.lock = threading.Lock()
# 当前正在serving的实例
self.model_serving_instance: ModelServing = None
self._ts_process = None
self.model_timer_thread = None
@property
def ts_process(self) -> subprocess.Popen:
"""获得当前的ts serving的Popen实例
"""
return self._ts_process
@ts_process.setter
def ts_process(self, process: subprocess.Popen):
"""设置ts serving的Popen
"""
self._ts_process = process
def Cycle(self, state: State, params: dict, data_dir, input_channels: Optional[Dict[str, Channels]] = None, input_artifacts: Optional[Dict[str, Artifact]] = None) -> Channels:
training_channels = list(input_channels.values())[0]
latest_checkpoint = state["model_checkpoint"]
if training_channels["checkpoint"] <= latest_checkpoint:
# 尝试提交最新的model
if not queryModelIsExist(self.component_msg["model_name"]):
mar_file = os.path.join(
data_dir, "{}-{}.mar".format(self.component_msg["model_name"], str(state["model_checkpoint"])))
if os.path.exists(mar_file):
if not registerModelJob(
url=os.path.abspath(mar_file),
handler=get_handler_module()
):
logger.error("register failed")
return None
to_use_model_dir = os.path.join(list(input_artifacts.values())[
0].url, "checkpoints", str(training_channels["checkpoint"]))
os.makedirs(data_dir, exist_ok=True)
# 打包.mar文件
if not outputMar(
model_name=self.component_msg["model_name"],
handler=get_handler_path(),
extra_file="{},{}".format(to_use_model_dir, os.path.join(
data_dir, "onceml_config.json")),
export_path=data_dir,
version=str(training_channels["checkpoint"])):
logger.error("outputMar failed")
return None
# 将.mar文件重命名
os.rename(os.path.join(data_dir, "{}.mar".format(
self.component_msg["model_name"])), os.path.join(data_dir, "{}-{}.mar".format(
self.component_msg["model_name"], str(training_channels["checkpoint"]))))
#提交 .mar
if not registerModelJob(
url=os.path.abspath(
os.path.join(data_dir, "{}-{}.mar".format(self.component_msg["model_name"], str(training_channels["checkpoint"])))),
handler=get_handler_module()):
logger.error("register failed")
return None
state["model_checkpoint"] = training_channels["checkpoint"]
return None
@deprecated(reason="not use", version="0.0.1")
def register_model_timer(self, state, data_dir):
'''注册模型的定时器
现在的torch serving有一些问题:进程莫名退出,这时候pod的相应容器会自动重启,然后需要main容器里的框架定时进行注册
'''
def put_model():
#尝试提交 .mar
while True:
# 先查询
if not queryModelIsExist(self.component_msg["model_name"]):
mar_file = os.path.join(
data_dir, "{}-{}.mar".format(self.component_msg["model_name"], str(state["model_checkpoint"])))
if os.path.exists(mar_file):
if not registerModelJob(
url=os.path.abspath(mar_file),
handler=get_handler_module()
):
logger.error("register failed")
time.sleep(2)
# 创建线程
self.model_timer_thread = threading.Thread(target=put_model)
def pre_execute(self, state: State, params: dict, data_dir: str):
self.ensemble_models = params["ensemble_models"]
self.model_serving_cls: type = params["model_serving_cls"]
# 启动ts serving进程
# self.ts_process = run_ts_serving(
# TS_PROPERTIES_PATH, model_store=os.path.abspath(data_dir))
initial_mar_file = os.path.join(
data_dir,
"{}-{}.mar".format(self.component_msg["model_name"], str(state["model_checkpoint"])))
if os.path.exists(initial_mar_file):
# 提交一个.mar文件到ts serving
if not registerModelJob(url=initial_mar_file, handler=get_handler_module(), maxtry=10):
logger.error("register failed")
# 生成handler的runtime 的配置onceml_config.json
with open(os.path.join(data_dir, "onceml_config.json"), "w") as f:
f.write(objectDumps(generate_onceml_config_json(
working_dir=global_config.PROJECTDIR,
module=self.model_serving_cls.__module__,
cls_name=self.model_serving_cls.__name__,
task=self.component_msg["task_name"],
model=self.component_msg["model_name"],
project_name=self.component_msg["project"])))
pipeline_utils.update_pipeline_model_serving_component_id(
self.component_msg["project"],
self.component_msg["task_name"],
self.component_msg["model_name"],
self.component_msg['component_id'])
@deprecated(reason="not use", version="0.0.1")
def exit_execute(self):
"""结束时,也停止响应的ts serving进程
"""
if self.ts_process is not None:
self.ts_process.terminate()
self.ts_process.wait()
self.ts_process.kill()
@deprecated(reason="not use", version="0.0.1")
def _POST_predict(self, req_handler: BaseHTTPRequestHandler):
content_length = int(req_handler.headers['Content-Length'])
post_data = req_handler.rfile.read(content_length).decode(
'utf-8') # <--- Gets the data itself
'''todo:add ensemble
'''
logger.info("收到predict请求")
self.lock.acquire()
use_instance = self.model_serving_instance
self.lock.release()
logger.info("获得use_instance")
if use_instance is None:
req_handler.send_response(200)
req_handler.send_header('Content-type', 'application/json')
req_handler.end_headers()
req_handler.wfile.write("no available model".encode('utf-8'))
else:
res = self.model_serving_instance.serving(post_data, None)
req_handler.send_response(200)
req_handler.send_header('Content-type', 'application/json')
req_handler.end_headers()
req_handler.wfile.write(json.dumps(res).encode('utf-8'))
class CycleModelServing(BaseComponent):
def __init__(self, model_generator_component: BaseComponent, model_serving_cls, ensemble_models: list = [], **args):
"""部署模型
接收modelGenerator的更新的模型的消息,从而对部署的模型进行更新
"""
super().__init__(executor=_executor,
inputs=[model_generator_component],
model_serving_cls=model_serving_cls,
ensemble_models=ensemble_models, **args)
self.state = {
"model_checkpoint": -1, # 当前使用的模型的版本号(用模型的时间戳来辨别)
}
def extra_svc_port_internal(self) -> List[Tuple[str, str, int]]:
return [("ts", "TCP", TS_INFERENCE_PORT)]
def extra_pod_containers_internal(self) -> List[PodContainer]:
'''
注册torch serving
'''
frameworks = super().extra_pod_containers_internal()
ts = PodContainer("ts")
ts.command = ['python']
ts.args = ["-m", "onceml.thirdParty.PyTorchServing.initProcess",
"--model_store",
"{}".format(os.path.join(
global_config.OUTPUTSDIR,
self.artifact.url,
Component_Data_URL.ARTIFACTS.value))]
ts.SetReadinessProbe([str(TS_INFERENCE_PORT), "/ping"])
ts.SetLivenessProbe([str(TS_INFERENCE_PORT), "/ping"])
return frameworks+[ts]
|
artifacts.py
|
import hashlib
import json
import mimetypes
import os
from copy import deepcopy
from datetime import datetime
from multiprocessing import RLock, Event
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp, mkstemp
from threading import Thread
from time import time
from zipfile import ZipFile, ZIP_DEFLATED
import humanfriendly
import six
from PIL import Image
from pathlib2 import Path
from six.moves.urllib.parse import urlparse
from ..backend_api import Session
from ..backend_api.services import tasks
from ..backend_interface.metrics.events import UploadEvent
from ..debugging.log import LoggerRoot
from ..storage.helper import remote_driver_schemes
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
class Artifact(object):
"""
Read-Only Artifact object
"""
@property
def url(self):
"""
:return: url of uploaded artifact
"""
return self._url
@property
def name(self):
"""
:return: name of artifact
"""
return self._name
@property
def size(self):
"""
:return: size in bytes of artifact
"""
return self._size
@property
def type(self):
"""
:return: type (str) of of artifact
"""
return self._type
@property
def mode(self):
"""
:return: mode (str) of of artifact. either "input" or "output"
"""
return self._mode
@property
def hash(self):
"""
:return: SHA2 hash (str) of of artifact content.
"""
return self._hash
@property
def timestamp(self):
"""
:return: Timestamp (datetime) of uploaded artifact.
"""
return self._timestamp
@property
def metadata(self):
"""
:return: Key/Value dictionary attached to artifact.
"""
return self._metadata
@property
def preview(self):
"""
:return: string (str) representation of the artifact.
"""
return self._preview
def __init__(self, artifact_api_object):
"""
construct read-only object from api artifact object
:param tasks.Artifact artifact_api_object:
"""
self._name = artifact_api_object.key
self._size = artifact_api_object.content_size
self._type = artifact_api_object.type
self._mode = artifact_api_object.mode
self._url = artifact_api_object.uri
self._hash = artifact_api_object.hash
self._timestamp = datetime.fromtimestamp(artifact_api_object.timestamp)
self._metadata = dict(artifact_api_object.display_data) if artifact_api_object.display_data else {}
self._preview = artifact_api_object.type_data.preview if artifact_api_object.type_data else None
self._object = None
def get(self):
"""
Return an object constructed from the artifact file
Currently supported types: Numpy.array, pandas.DataFrame, PIL.Image, dict (json)
All other types will return a pathlib2.Path object pointing to a local copy of the artifacts file (or directory)
:return: One of the following objects Numpy.array, pandas.DataFrame, PIL.Image, dict (json), pathlib2.Path
"""
if self._object:
return self._object
local_file = self.get_local_copy()
if self.type == 'numpy' and np:
self._object = np.load(local_file)[self.name]
elif self.type in ('pandas', Artifacts._pd_artifact_type) and pd:
self._object = pd.read_csv(local_file)
elif self.type == 'image':
self._object = Image.open(local_file)
elif self.type == 'JSON':
with open(local_file, 'rt') as f:
self._object = json.load(f)
local_file = Path(local_file)
if self._object is None:
self._object = local_file
else:
from trains.storage.helper import StorageHelper
# only of we are not using cache, we should delete the file
if not hasattr(StorageHelper, 'get_cached_disabled'):
# delete the temporary file, we already used it
try:
local_file.unlink()
except Exception:
pass
return self._object
def get_local_copy(self, extract_archive=True):
"""
:param bool extract_archive: If True and artifact is of type 'archive' (compressed folder)
The returned path will be a temporary folder containing the archive content
:return: a local path to a downloaded copy of the artifact
"""
from trains.storage.helper import StorageHelper
local_path = StorageHelper.get_local_copy(self.url)
if local_path and extract_archive and self.type == 'archive':
try:
temp_folder = mkdtemp(prefix='artifact_', suffix='.archive_'+self.name)
ZipFile(local_path).extractall(path=temp_folder)
except Exception:
try:
Path(temp_folder).rmdir()
except Exception:
pass
return local_path
try:
Path(local_path).unlink()
except Exception:
pass
return temp_folder
return local_path
def __repr__(self):
return str({'name': self.name, 'size': self.size, 'type': self.type, 'mode': self.mode, 'url': self.url,
'hash': self.hash, 'timestamp': self.timestamp,
'metadata': self.metadata, 'preview': self.preview, })
class Artifacts(object):
_flush_frequency_sec = 300.
# notice these two should match
_save_format = '.csv.gz'
_compression = 'gzip'
# hashing constants
_hash_block_size = 65536
_pd_artifact_type = 'data-audit-table'
class _ProxyDictWrite(dict):
""" Dictionary wrapper that updates an arguments instance on any item set in the dictionary """
def __init__(self, artifacts_manager, *args, **kwargs):
super(Artifacts._ProxyDictWrite, self).__init__(*args, **kwargs)
self._artifacts_manager = artifacts_manager
# list of artifacts we should not upload (by name & weak-reference)
self.artifact_metadata = {}
# list of hash columns to calculate uniqueness for the artifacts
self.artifact_hash_columns = {}
def __setitem__(self, key, value):
# check that value is of type pandas
if pd and isinstance(value, pd.DataFrame):
super(Artifacts._ProxyDictWrite, self).__setitem__(key, value)
if self._artifacts_manager:
self._artifacts_manager.flush()
else:
raise ValueError('Artifacts currently support pandas.DataFrame objects only')
def unregister_artifact(self, name):
self.artifact_metadata.pop(name, None)
self.pop(name, None)
def add_metadata(self, name, metadata):
self.artifact_metadata[name] = deepcopy(metadata)
def get_metadata(self, name):
return self.artifact_metadata.get(name)
def add_hash_columns(self, artifact_name, hash_columns):
self.artifact_hash_columns[artifact_name] = hash_columns
def get_hash_columns(self, artifact_name):
return self.artifact_hash_columns.get(artifact_name)
@property
def registered_artifacts(self):
return self._artifacts_container
@property
def summary(self):
return self._summary
def __init__(self, task):
self._task = task
# notice the double link, this important since the Artifact
# dictionary needs to signal the Artifacts base on changes
self._artifacts_container = self._ProxyDictWrite(self)
self._last_artifacts_upload = {}
self._unregister_request = set()
self._thread = None
self._flush_event = Event()
self._exit_flag = False
self._summary = ''
self._temp_folder = []
self._task_artifact_list = []
self._task_edit_lock = RLock()
self._storage_prefix = None
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
"""
:param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
:param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
:param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
:param list uniqueness_columns: list of columns for artifact uniqueness comparison criteria. The default value
is True, which equals to all the columns (same as artifact.columns).
"""
# currently we support pandas.DataFrame (which we will upload as csv.gz)
if name in self._artifacts_container:
LoggerRoot.get_base_logger().info('Register artifact, overwriting existing artifact \"{}\"'.format(name))
self._artifacts_container.add_hash_columns(name, list(artifact.columns if uniqueness_columns is True else uniqueness_columns))
self._artifacts_container[name] = artifact
if metadata:
self._artifacts_container.add_metadata(name, metadata)
def unregister_artifact(self, name):
# Remove artifact from the watch list
self._unregister_request.add(name)
self.flush()
def upload_artifact(self, name, artifact_object=None, metadata=None, delete_after_upload=False):
if not Session.check_min_api_version('2.3'):
LoggerRoot.get_base_logger().warning('Artifacts not supported by your TRAINS-server version, '
'please upgrade to the latest server version')
return False
if name in self._artifacts_container:
raise ValueError("Artifact by the name of {} is already registered, use register_artifact".format(name))
artifact_type_data = tasks.ArtifactTypeData()
override_filename_in_uri = None
override_filename_ext_in_uri = None
uri = None
if np and isinstance(artifact_object, np.ndarray):
artifact_type = 'numpy'
artifact_type_data.content_type = 'application/numpy'
artifact_type_data.preview = str(artifact_object.__repr__())
override_filename_ext_in_uri = '.npz'
override_filename_in_uri = name+override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=name+'.', suffix=override_filename_ext_in_uri)
os.close(fd)
np.savez_compressed(local_filename, **{name: artifact_object})
delete_after_upload = True
elif pd and isinstance(artifact_object, pd.DataFrame):
artifact_type = 'pandas'
artifact_type_data.content_type = 'text/csv'
artifact_type_data.preview = str(artifact_object.__repr__())
override_filename_ext_in_uri = self._save_format
override_filename_in_uri = name
fd, local_filename = mkstemp(prefix=name+'.', suffix=override_filename_ext_in_uri)
os.close(fd)
artifact_object.to_csv(local_filename, compression=self._compression)
delete_after_upload = True
elif isinstance(artifact_object, Image.Image):
artifact_type = 'image'
artifact_type_data.content_type = 'image/png'
desc = str(artifact_object.__repr__())
artifact_type_data.preview = desc[1:desc.find(' at ')]
override_filename_ext_in_uri = '.png'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=name+'.', suffix=override_filename_ext_in_uri)
os.close(fd)
artifact_object.save(local_filename)
delete_after_upload = True
elif isinstance(artifact_object, dict):
artifact_type = 'JSON'
artifact_type_data.content_type = 'application/json'
preview = json.dumps(artifact_object, sort_keys=True, indent=4)
override_filename_ext_in_uri = '.json'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=name+'.', suffix=override_filename_ext_in_uri)
os.write(fd, bytes(preview.encode()))
os.close(fd)
artifact_type_data.preview = preview
delete_after_upload = True
elif isinstance(artifact_object, six.string_types) and urlparse(artifact_object).scheme in remote_driver_schemes:
# we should not upload this, just register
local_filename = None
uri = artifact_object
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
elif isinstance(artifact_object, six.string_types + (Path,)):
# check if single file
artifact_object = Path(artifact_object)
artifact_object.expanduser().absolute()
try:
create_zip_file = not artifact_object.is_file()
except Exception: # Hack for windows pathlib2 bug, is_file isn't valid.
create_zip_file = True
else: # We assume that this is not Windows os
if artifact_object.is_dir():
# change to wildcard
artifact_object /= '*'
if create_zip_file:
folder = Path('').joinpath(*artifact_object.parts[:-1])
if not folder.is_dir():
raise ValueError("Artifact file/folder '{}' could not be found".format(
artifact_object.as_posix()))
wildcard = artifact_object.parts[-1]
files = list(Path(folder).rglob(wildcard))
override_filename_ext_in_uri = '.zip'
override_filename_in_uri = folder.parts[-1] + override_filename_ext_in_uri
fd, zip_file = mkstemp(prefix=folder.parts[-1]+'.', suffix=override_filename_ext_in_uri)
try:
artifact_type_data.content_type = 'application/zip'
artifact_type_data.preview = 'Archive content {}:\n'.format(artifact_object.as_posix())
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in sorted(files):
if filename.is_file():
relative_file_name = filename.relative_to(folder).as_posix()
artifact_type_data.preview += '{} - {}\n'.format(
relative_file_name, humanfriendly.format_size(filename.stat().st_size))
zf.write(filename.as_posix(), arcname=relative_file_name)
except Exception as e:
# failed uploading folder:
LoggerRoot.get_base_logger().warning('Exception {}\nFailed zipping artifact folder {}'.format(
folder, e))
return None
finally:
os.close(fd)
artifact_object = zip_file
artifact_type = 'archive'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
delete_after_upload = True
else:
if not artifact_object.is_file():
raise ValueError("Artifact file '{}' could not be found".format(artifact_object.as_posix()))
override_filename_in_uri = artifact_object.parts[-1]
artifact_object = artifact_object.as_posix()
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
else:
raise ValueError("Artifact type {} not supported".format(type(artifact_object)))
# remove from existing list, if exists
for artifact in self._task_artifact_list:
if artifact.key == name:
if artifact.type == self._pd_artifact_type:
raise ValueError("Artifact of name {} already registered, "
"use register_artifact instead".format(name))
self._task_artifact_list.remove(artifact)
break
if not local_filename:
file_size = None
file_hash = None
else:
# check that the file to upload exists
local_filename = Path(local_filename).absolute()
if not local_filename.exists() or not local_filename.is_file():
LoggerRoot.get_base_logger().warning('Artifact upload failed, cannot find file {}'.format(
local_filename.as_posix()))
return False
file_hash, _ = self.sha256sum(local_filename.as_posix())
file_size = local_filename.stat().st_size
uri = self._upload_local_file(local_filename, name,
delete_after_upload=delete_after_upload,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri)
timestamp = int(time())
artifact = tasks.Artifact(key=name, type=artifact_type,
uri=uri,
content_size=file_size,
hash=file_hash,
timestamp=timestamp,
type_data=artifact_type_data,
display_data=[(str(k), str(v)) for k, v in metadata.items()] if metadata else None)
# update task artifacts
with self._task_edit_lock:
self._task_artifact_list.append(artifact)
self._task.set_artifacts(self._task_artifact_list)
return True
def flush(self):
# start the thread if it hasn't already:
self._start()
# flush the current state of all artifacts
self._flush_event.set()
def stop(self, wait=True):
# stop the daemon thread and quit
# wait until thread exists
self._exit_flag = True
self._flush_event.set()
if wait:
if self._thread:
self._thread.join()
# remove all temp folders
for f in self._temp_folder:
try:
Path(f).rmdir()
except Exception:
pass
def _start(self):
""" Start daemon thread if any artifacts are registered and thread is not up yet """
if not self._thread and self._artifacts_container:
# start the daemon thread
self._flush_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def _daemon(self):
while not self._exit_flag:
self._flush_event.wait(self._flush_frequency_sec)
self._flush_event.clear()
artifact_keys = list(self._artifacts_container.keys())
for name in artifact_keys:
try:
self._upload_data_audit_artifacts(name)
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
# create summary
self._summary = self._get_statistics()
def _upload_data_audit_artifacts(self, name):
logger = self._task.get_logger()
pd_artifact = self._artifacts_container.get(name)
pd_metadata = self._artifacts_container.get_metadata(name)
# remove from artifacts watch list
if name in self._unregister_request:
try:
self._unregister_request.remove(name)
except KeyError:
pass
self._artifacts_container.unregister_artifact(name)
if pd_artifact is None:
return
override_filename_ext_in_uri = self._save_format
override_filename_in_uri = name
fd, local_csv = mkstemp(prefix=name + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
local_csv = Path(local_csv)
pd_artifact.to_csv(local_csv.as_posix(), index=False, compression=self._compression)
current_sha2, file_sha2 = self.sha256sum(local_csv.as_posix(), skip_header=32)
if name in self._last_artifacts_upload:
previous_sha2 = self._last_artifacts_upload[name]
if previous_sha2 == current_sha2:
# nothing to do, we can skip the upload
try:
local_csv.unlink()
except Exception:
pass
return
self._last_artifacts_upload[name] = current_sha2
# If old trains-server, upload as debug image
if not Session.check_min_api_version('2.3'):
logger.report_image(title='artifacts', series=name, local_path=local_csv.as_posix(),
delete_after_upload=True, iteration=self._task.get_last_iteration(),
max_image_history=2)
return
# Find our artifact
artifact = None
for an_artifact in self._task_artifact_list:
if an_artifact.key == name:
artifact = an_artifact
break
file_size = local_csv.stat().st_size
# upload file
uri = self._upload_local_file(local_csv, name, delete_after_upload=True,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri)
# update task artifacts
with self._task_edit_lock:
if not artifact:
artifact = tasks.Artifact(key=name, type=self._pd_artifact_type)
self._task_artifact_list.append(artifact)
artifact_type_data = tasks.ArtifactTypeData()
artifact_type_data.data_hash = current_sha2
artifact_type_data.content_type = "text/csv"
artifact_type_data.preview = str(pd_artifact.__repr__())+'\n\n'+self._get_statistics({name: pd_artifact})
artifact.type_data = artifact_type_data
artifact.uri = uri
artifact.content_size = file_size
artifact.hash = file_sha2
artifact.timestamp = int(time())
artifact.display_data = [(str(k), str(v)) for k, v in pd_metadata.items()] if pd_metadata else None
self._task.set_artifacts(self._task_artifact_list)
def _upload_local_file(self, local_file, name, delete_after_upload=False,
override_filename=None,
override_filename_ext=None):
"""
Upload local file and return uri of the uploaded file (uploading in the background)
"""
upload_uri = self._task.output_uri or self._task.get_logger().get_default_upload_destination()
if not isinstance(local_file, Path):
local_file = Path(local_file)
ev = UploadEvent(metric='artifacts', variant=name,
image_data=None, upload_uri=upload_uri,
local_image_path=local_file.as_posix(),
delete_after_upload=delete_after_upload,
override_filename=override_filename,
override_filename_ext=override_filename_ext,
override_storage_key_prefix=self._get_storage_uri_prefix())
_, uri = ev.get_target_full_upload_uri(upload_uri)
# send for upload
self._task.reporter._report(ev)
return uri
def _get_statistics(self, artifacts_dict=None):
summary = ''
artifacts_dict = artifacts_dict or self._artifacts_container
thread_pool = ThreadPool()
try:
# build hash row sets
artifacts_summary = []
for a_name, a_df in artifacts_dict.items():
hash_cols = self._artifacts_container.get_hash_columns(a_name)
if not pd or not isinstance(a_df, pd.DataFrame):
continue
if hash_cols is True:
hash_col_drop = []
else:
hash_cols = set(hash_cols)
missing_cols = hash_cols.difference(a_df.columns)
if missing_cols == hash_cols:
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. '
'Skipping uniqueness check for artifact.'.format(list(missing_cols), a_name)
)
continue
elif missing_cols:
# missing_cols must be a subset of hash_cols
hash_cols.difference_update(missing_cols)
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. Using {}.'.format(
list(missing_cols), a_name, list(hash_cols)
)
)
hash_col_drop = [col for col in a_df.columns if col not in hash_cols]
a_unique_hash = set()
def hash_row(r):
a_unique_hash.add(hash(bytes(r)))
a_shape = a_df.shape
# parallelize
a_hash_cols = a_df.drop(columns=hash_col_drop)
thread_pool.map(hash_row, a_hash_cols.values)
# add result
artifacts_summary.append((a_name, a_shape, a_unique_hash,))
# build intersection summary
for i, (name, shape, unique_hash) in enumerate(artifacts_summary):
summary += '[{name}]: shape={shape}, {unique} unique rows, {percentage:.1f}% uniqueness\n'.format(
name=name, shape=shape, unique=len(unique_hash), percentage=100*len(unique_hash)/float(shape[0]))
for name2, shape2, unique_hash2 in artifacts_summary[i+1:]:
intersection = len(unique_hash & unique_hash2)
summary += '\tIntersection with [{name2}] {intersection} rows: {percentage:.1f}%\n'.format(
name2=name2, intersection=intersection, percentage=100*intersection/float(len(unique_hash2)))
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
finally:
thread_pool.close()
thread_pool.terminate()
return summary
def _get_temp_folder(self, force_new=False):
if force_new or not self._temp_folder:
new_temp = mkdtemp(prefix='artifacts_')
self._temp_folder.append(new_temp)
return new_temp
return self._temp_folder[0]
def _get_storage_uri_prefix(self):
if not self._storage_prefix:
self._storage_prefix = self._task._get_output_destination_suffix()
return self._storage_prefix
@staticmethod
def sha256sum(filename, skip_header=0):
# create sha2 of the file, notice we skip the header of the file (32 bytes)
# because sometimes that is the only change
h = hashlib.sha256()
file_hash = hashlib.sha256()
b = bytearray(Artifacts._hash_block_size)
mv = memoryview(b)
try:
with open(filename, 'rb', buffering=0) as f:
# skip header
if skip_header:
file_hash.update(f.read(skip_header))
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
if skip_header:
file_hash.update(mv[:n])
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
return None, None
return h.hexdigest(), file_hash.hexdigest() if skip_header else None
|
test_xmlrpc.py
|
import base64
import datetime
import decimal
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import http, http.server
import socket
import re
import io
import contextlib
from test import support
try:
import gzip
except ImportError:
gzip = None
try:
import threading
except ImportError:
threading = None
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary(b"my dog has fleas"),
'b64bytes': b"my dog has fleas",
'b64bytearray': bytearray(b"my dog has fleas"),
'boolean': False,
'unicode': '\u4000\u6000\u8000',
'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 2, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 2, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_builtin_types set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MININT-1, dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT),
float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42),
float(xmlrpclib.MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_encoding(self):
value = {'key\u20ac\xa4':
'value\u20ac\xa4'}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = 'method\u20ac\xa4'
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodname=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
def test_dump_bytes(self):
sample = b"my dog has fleas"
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in bytes, bytearray, xmlrpclib.Binary:
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><array>'
'<value><spam/></value>'
'</array></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><struct>'
'<member><name>a</name><value><spam/></value></member>'
'<member><name>b</name><value><spam/></value></member>'
'</struct></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
def check_loads(self, s, value, **kwargs):
dump = '<params><param><value>%s</value></param></params>' % s
result, m = xmlrpclib.loads(dump, **kwargs)
(newvalue,) = result
self.assertEqual(newvalue, value)
self.assertIs(type(newvalue), type(value))
self.assertIsNone(m)
def test_load_standard_types(self):
check = self.check_loads
check('string', 'string')
check('<string>string</string>', 'string')
check('<string>𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string</string>', '𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string')
check('<int>2056183947</int>', 2056183947)
check('<int>-2056183947</int>', -2056183947)
check('<i4>2056183947</i4>', 2056183947)
check('<double>46093.78125</double>', 46093.78125)
check('<boolean>0</boolean>', False)
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
xmlrpclib.Binary(b'\x00byte string\xff'))
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
b'\x00byte string\xff', use_builtin_types=True)
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
xmlrpclib.DateTime('20050210T11:41:23'))
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
datetime.datetime(2005, 2, 10, 11, 41, 23),
use_builtin_types=True)
check('<array><data>'
'<value><int>1</int></value><value><int>2</int></value>'
'</data></array>', [1, 2])
check('<struct>'
'<member><name>b</name><value><int>2</int></value></member>'
'<member><name>a</name><value><int>1</int></value></member>'
'</struct>', {'a': 1, 'b': 2})
def test_load_extension_types(self):
check = self.check_loads
check('<nil/>', None)
check('<ex:nil/>', None)
check('<i1>205</i1>', 205)
check('<i2>20561</i2>', 20561)
check('<i8>9876543210</i8>', 9876543210)
check('<biginteger>98765432100123456789</biginteger>',
98765432100123456789)
check('<float>93.78125</float>', 93.78125)
check('<bigdecimal>9876543210.0123456789</bigdecimal>',
decimal.Decimal('9876543210.0123456789'))
def test_get_host_info(self):
# see bug #3613, this raised a TypeError
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info("user@host.tld"),
('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except OSError:
self.assertTrue(has_ssl)
@unittest.skipUnless(threading, "Threading required for this test.")
def test_keepalive_disconnect(self):
class RequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
handled = False
def do_POST(self):
length = int(self.headers.get("Content-Length"))
self.rfile.read(length)
if self.handled:
self.close_connection = True
return
response = xmlrpclib.dumps((5,), methodresponse=True)
response = response.encode()
self.send_response(http.HTTPStatus.OK)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
self.handled = True
self.close_connection = False
def run_server():
server.socket.settimeout(float(1)) # Don't hang if client fails
server.handle_request() # First request and attempt at second
server.handle_request() # Retried second request
server = http.server.HTTPServer((support.HOST, 0), RequestHandler)
self.addCleanup(server.server_close)
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join)
url = "http://{}:{}/".format(*server.server_address)
with xmlrpclib.ServerProxy(url) as p:
self.assertEqual(p.method(), 5)
self.assertEqual(p.method(), 5)
class SimpleXMLRPCDispatcherTestCase(unittest.TestCase):
class DispatchExc(Exception):
"""Raised inside the dispatched functions when checking for
chained exceptions"""
def test_call_registered_func(self):
"""Calls explicitly registered function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
def dispatched_func(*params):
raise self.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(dispatched_func)
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_instance_func(self):
"""Calls a registered instance attribute as a function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
class DispatchedClass:
def dispatched_func(self, *params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(DispatchedClass())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_dispatch_func(self):
"""Calls the registered instance's `_dispatch` function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_method = 'method'
exp_params = 1, 2, 3
class TestInstance:
def _dispatch(self, method, params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(
method, params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(TestInstance())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch(exp_method, exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_method, exp_params))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_registered_func_is_none(self):
"""Calls explicitly registered function which is None"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(None, name='method')
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_instance_has_no_func(self):
"""Attempts to call nonexistent function on a registered instance"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(object())
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_cannot_locate_func(self):
"""Calls a function that the dispatcher cannot locate"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
# this will raise AttributeError because code don't want us to use
# private methods
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time(
[2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %#x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
# datetime vs. DateTime
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
# str vs. DateTime
dstr = now.strftime("%Y%m%dT%H:%M:%S")
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
# some other types
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
with self.assertRaises(TypeError):
dtime == 1970
with self.assertRaises(TypeError):
dtime != dbytes
with self.assertRaises(TypeError):
dtime == bytearray(dbytes)
with self.assertRaises(TypeError):
dtime != dtuple
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
class BinaryTestCase(unittest.TestCase):
# XXX What should str(Binary(b"\xff")) return? I'm chosing "\xff"
# for now (i.e. interpreting the binary data as Latin-1-encoded
# text). But this feels very unsatisfactory. Perhaps we should
# only define repr(), and return r"Binary(b'\xff')" instead?
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, "latin-1"))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, "latin-1"))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, "latin-1"))
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
class Fixture:
@staticmethod
def getData():
return '42'
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
encoding=encoding,
logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x: x, 'têšt')
@serv.register_function
def my_function():
'''This is my function'''
return True
@serv.register_function(name='add')
def _(x, y):
return x + y
testInstance = TestInstanceClass()
serv.register_instance(testInstance, allow_dotted_names=True)
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError("broken dispatcher")
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
serv.add_dispatcher("/is/broken", BrokenDispatcher())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore OSErrors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
threading.Thread(target=self.threadFunc, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_client_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='ascii')
self.assertEqual(p.têšt(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with http.client, it should return 404 header and
# 'Not Found' message.
conn = httplib.client.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add', 'têšt',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall',
'Fixture'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<class \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_allow_dotted_names_true(self):
# XXX also need allow_dotted_names_false test.
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
data = server.Fixture.getData()
self.assertEqual(data, '42')
def test_unicode_host(self):
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
self.assertEqual(server.add("a", "\xe9"), "a\xe9")
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
conn = http.client.HTTPConnection(ADDR, PORT)
conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
conn.close()
def test_context_manager(self):
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 3)
self.assertNotEqual(server('transport')._connection,
(None, None))
self.assertEqual(server('transport')._connection,
(None, None))
def test_context_manager_method_error(self):
try:
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, "a")
except xmlrpclib.Fault:
pass
self.assertEqual(server('transport')._connection,
(None, None))
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
def test_server_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
p("close")()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
p("close")()
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipUtilTestCase(unittest.TestCase):
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = b'\0' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = b'\0' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegex(ValueError,
"max gzipped payload length exceeded"):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
# Without threading, http_server() and http_multi_server() will not
# be executed and URL is still equal to None. 'http://' is a just
# enough to choose the scheme (HTTP)
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
# check that flag is false by default
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("X-exception"), expected_err)
self.assertTrue(e.headers.get("X-traceback") is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with support.EnvironmentVarGuard() as env, \
captured_stdout(encoding=self.cgi.encoding) as data_out, \
support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just
# need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search(r'Content-Length: (\d+)', handle).group(1)),
len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
# SimpleXMLRPCDispatcher.__init__ accepts use_builtin_types, which
# makes all dispatch of binary data as bytes instances, and all
# dispatch of datetime argument as datetime.datetime instances.
self.log = []
expected_bytes = b"my dog has fleas"
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(
allow_none=True, encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(("localhost", 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
@support.reap_threads
def test_main():
support.run_unittest(XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, UseBuiltinTypesTestCase,
SimpleServerTestCase, SimpleServerEncodingTestCase,
KeepaliveServerTestCase1, KeepaliveServerTestCase2,
GzipServerTestCase, GzipUtilTestCase,
MultiPathServerTestCase, ServerProxyTestCase, FailingServerTestCase,
CGIHandlerTestCase, SimpleXMLRPCDispatcherTestCase)
if __name__ == "__main__":
test_main()
|
apkAnalyzer.py
|
#!/usr/bin/python3
import json
import sys
import os
import threading
import queue
# Module handling
try:
from androguard.core.bytecodes.apk import APK
except:
print("Error: >androguard< module not found.")
sys.exit(1)
try:
from prettytable import PrettyTable
except:
print("Error: >prettytable< module not found.")
sys.exit(1)
try:
from colorama import Fore, Style
except:
print("Error: >colorama< module not found.")
sys.exit(1)
try:
import spacy
except:
print("Error: >spacy< module not found.")
sys.exit(1)
# Colors
red = Fore.LIGHTRED_EX
cyan = Fore.LIGHTCYAN_EX
white = Style.RESET_ALL
green = Fore.LIGHTGREEN_EX
yellow = Fore.LIGHTYELLOW_EX
magenta = Fore.LIGHTMAGENTA_EX
# Legends
infoS = f"{cyan}[{red}*{cyan}]{white}"
errorS = f"{cyan}[{red}!{cyan}]{white}"
# necessary variables
danger = 0
normal = 0
# Gathering all strings from file
allStrings = open("temp.txt", "r").read().split('\n')
# Lets get all suspicious strings
susStrings = open("Systems/Android/suspicious.txt", "r").read().split('\n')
# Queue
global q
q = queue.Queue()
# Permission analyzer
def Analyzer(parsed):
global danger
global normal
statistics = PrettyTable()
# Getting blacklisted permissions
with open("Systems/Android/perms.json", "r") as f:
permissions = json.load(f)
apkPerms = parsed.get_permissions()
permArr = []
# Getting target APK file's permissions
for p in range(len(permissions)):
permArr.append(permissions[p]["permission"])
# Parsing permissions
statistics.field_names = [f"{green}Permissions{white}", f"{green}State{white}"]
for pp in apkPerms:
if pp.split(".")[-1] in permArr:
statistics.add_row([f"{pp}", f"{red}Risky{white}"])
danger += 1
else:
statistics.add_row([f"{pp}", f"{yellow}Info{white}"])
normal += 1
# If there is no permission:
if danger == 0 and normal == 0:
print(f"{errorS} Not any permissions found.")
else:
print(statistics)
# Analyzing more deeply
def DeepScan(parsed):
# Getting features
featStat = PrettyTable()
featStat.field_names = [f"{green}Features{white}"]
features = parsed.get_features()
if features != []:
for ff in features:
featStat.add_row([ff])
print(featStat)
else:
pass
# Activities
activeStat = PrettyTable()
activeStat.field_names = [f"{green}Activities{white}"]
actos = parsed.get_activities()
if actos != []:
for aa in actos:
activeStat.add_row([aa])
print(activeStat)
else:
pass
# Services
servStat = PrettyTable()
servStat.field_names = [f"{green}Services{white}"]
servv = parsed.get_services()
if servv != []:
for ss in servv:
servStat.add_row([ss])
print(servStat)
else:
pass
# Receivers
recvStat = PrettyTable()
recvStat.field_names = [f"{green}Receivers{white}"]
receive = parsed.get_receivers()
if receive != []:
for rr in receive:
recvStat.add_row([rr])
print(recvStat)
else:
pass
# Providers
provStat = PrettyTable()
provStat.field_names = [f"{green}Providers{white}"]
provids = parsed.get_providers()
if provids != []:
for pp in provids:
provStat.add_row([pp])
print(provStat)
else:
pass
# Handling language package
def LangNotFound():
print(f"{errorS} Language package not found. Without this u wont be able to analyze strings.")
choose = str(input("=> Should I install it for you [Y/n]?: "))
if choose == 'Y' or choose == 'y':
try:
os.system("python3 -m spacy download en")
print(f"{infoS} Language package downloaded.")
sys.exit(0)
except:
print(f"{errorS} Program encountered an error.")
sys.exit(1)
else:
print(f"{errorS} Without language package this module is wont work.")
sys.exit(1)
# APK string analyzer with NLP
def Detailed():
# Our sample string to analyze
while not q.empty():
targetString = q.get()
try:
nlp = spacy.load("en")
sample = nlp(targetString)
except:
LangNotFound()
# Lets analyze!!
for apkstr in allStrings:
# Parsing and calculating
testme = nlp(apkstr)
if testme.similarity(sample) >= 0.6:
print(f"{cyan}({magenta}{targetString}{cyan})->{white} {apkstr}")
# Execution
if __name__ == '__main__':
try:
# Getting and parsing target APK
targetAPK = str(sys.argv[1])
parsed = APK(targetAPK)
# Permissions side
Analyzer(parsed)
# Deep scanner
DeepScan(parsed)
# Strings side
print(f"{infoS} Analyzing extracted strings from that file. Please wait...\n")
#Thread Number
threadNumber = 0
for sus in susStrings:
q.put(sus)
threadNumber += 1
ts = []
for i in range(0,threadNumber):
try:
t = threading.Thread(target=Detailed)
ts.append(t)
t.start()
except Exception as e:
print(e)
for t in ts:
t.join()
except:
print(f"{errorS} An error occured.")
|
system_integration_testing.py
|
"""
Module is intended to automatically test the system package with how it integrates with the sub systems. The main idea
would be to simulate keypresses and PIR events to feed into the queues that the system uses to take actions.
"""
import logging
import time
from threading import Thread
from time import sleep
from pir_event import PIREvent, PirEventType
from system import System
class SystemIntegrationTesting:
def __init__(self):
self.total_testcase_count = 0
self.pass_count = 0
self.fail_count = 0
self.system_to_test = System()
self._logger = logging.getLogger('System Testing')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(threadName)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self._logger.addHandler(ch)
self._logger.setLevel(logging.DEBUG)
def test(self, expected, actual):
if expected == actual:
self.pass_count += 1
self._logger.debug("Pass - Expected=" + str(expected) + " Actual=" + str(actual))
else:
self.fail_count += 1
self._logger.debug("Fail - Expected=" + str(expected) + " Actual=" + str(actual))
self.total_testcase_count += 1
def print_results(self):
self._logger.debug("---------")
self._logger.debug("Results\tCount\tPercent")
self._logger.debug("Pass" + "\t" + str(self.pass_count) + "\t" + str(round(self.pass_count / self.total_testcase_count * 100, 2)))
self._logger.debug("Fail" + "\t" + str(self.fail_count) + "\t" + str(round(self.fail_count / self.total_testcase_count * 100, 2)))
self._logger.debug("Total" + "\t" + str(self.total_testcase_count))
self._logger.debug("---------")
def run(self):
Thread(target=self.system_to_test.run, args=(), name="Sensor_Thread").start()
sleep(10)
self._logger.debug("Starting up Integration testing")
# Test case #### for Armed.
self.system_to_test._process_keypress_event("123456")
# Wait 1 sec for arm delay
sleep(self.system_to_test._arm_time_delay + 1)
self.test(expected=True, actual=self.system_to_test.is_armed)
# Test case #### for triggering alarm
self.system_to_test.event_queue.put(PIREvent(time=time.time(), event_type=PirEventType.rising))
sleep(1)
self.test(expected=True, actual=self.system_to_test.alarm_active)
# Test case #### for disarming
self.system_to_test._process_keypress_event("123456")
sleep(1)
self.test(expected=False, actual=self.system_to_test.is_armed)
self.print_results()
self._logger.debug("Integration testing Complete!")
if __name__ == '__main__':
sys = SystemIntegrationTesting()
sys.run()
|
app.py
|
"""
PyGPSClient - Main tkinter application class.
Created on 12 Sep 2020
:author: semuadmin
:copyright: SEMU Consulting © 2020
:license: BSD 3-Clause
"""
from threading import Thread
from tkinter import Tk, Frame, N, S, E, W, PhotoImage, font
from .strings import (
TITLE,
MENUHIDESE,
MENUSHOWSE,
MENUHIDESB,
MENUSHOWSB,
MENUHIDECON,
MENUSHOWCON,
MENUHIDEMAP,
MENUSHOWMAP,
MENUHIDESATS,
MENUSHOWSATS,
INTROTXTNOPORTS,
)
from ._version import __version__
from .about_dialog import AboutDialog
from .banner_frame import BannerFrame
from .console_frame import ConsoleFrame
from .filehandler import FileHandler
from .globals import ICON_APP, DISCONNECTED
from .graphview_frame import GraphviewFrame
from .map_frame import MapviewFrame
from .menu_bar import MenuBar
from .serial_handler import SerialHandler
from .settings_frame import SettingsFrame
from .skyview_frame import SkyviewFrame
from .status_frame import StatusFrame
from .ubx_config_dialog import UBXConfigDialog
from .nmea_handler import NMEAHandler
from .ubx_handler import UBXHandler
VERSION = __version__
class App(Frame): # pylint: disable=too-many-ancestors
"""
Main PyGPSClient GUI Application Class
"""
def __init__(self, master, *args, **kwargs):
"""
Set up main application and add frames
:param tkinter.Tk master: reference to Tk root
:param args: optional args to pass to Frame parent class
:param kwargs: optional kwargs to pass to Frame parent class
"""
self.__master = master
Frame.__init__(self, self.__master, *args, **kwargs)
self.__master.protocol("WM_DELETE_WINDOW", self.on_exit)
self.__master.title(TITLE)
self.__master.iconphoto(True, PhotoImage(file=ICON_APP))
# Set initial widget visibility
self._show_settings = True
self._show_ubxconfig = False
self._show_status = True
self._show_console = True
self._show_map = True
self._show_sats = True
# Instantiate protocol handler classes
self.file_handler = FileHandler(self)
self.serial_handler = SerialHandler(self)
self.nmea_handler = NMEAHandler(self)
self.ubx_handler = UBXHandler(self)
self.dlg_ubxconfig = None
self._config_thread = None
# Load web map api key if there is one
self.api_key = self.file_handler.load_apikey()
self._body()
self._do_layout()
self._attach_events()
# Initialise widgets
self.frm_satview.init_sats()
self.frm_graphview.init_graph()
self.frm_banner.update_conn_status(DISCONNECTED)
def _body(self):
"""
Set up frame and widgets
"""
# these grid weights are what gives the grid its
# 'pack to window size' behaviour
self.__master.grid_columnconfigure(0, weight=1)
self.__master.grid_columnconfigure(1, weight=2)
self.__master.grid_columnconfigure(2, weight=2)
self.__master.grid_rowconfigure(0, weight=0)
self.__master.grid_rowconfigure(1, weight=2)
self.__master.grid_rowconfigure(2, weight=1)
self._set_default_fonts()
self.menu = MenuBar(self)
self.frm_status = StatusFrame(self, borderwidth=2, relief="groove")
self.frm_banner = BannerFrame(self, borderwidth=2, relief="groove")
self.frm_settings = SettingsFrame(self, borderwidth=2, relief="groove")
self.frm_console = ConsoleFrame(self, borderwidth=2, relief="groove")
self.frm_mapview = MapviewFrame(self, borderwidth=2, relief="groove")
self.frm_satview = SkyviewFrame(self, borderwidth=2, relief="groove")
self.frm_graphview = GraphviewFrame(self, borderwidth=2, relief="groove")
self.__master.config(menu=self.menu)
def _do_layout(self):
"""
Arrange widgets in main application frame
"""
self.frm_banner.grid(
column=0, row=0, columnspan=5, padx=2, pady=2, sticky=(N, S, E, W)
)
self._grid_console()
self._grid_sats()
self._grid_map()
self._grid_status()
self._grid_settings()
if self.frm_settings.serial_settings().status == 3: # NOPORTS
self.set_status(INTROTXTNOPORTS, "red")
def _attach_events(self):
"""
Bind events to main application
"""
self.__master.bind("<<ubx_read>>", self.serial_handler.on_read)
self.__master.bind("<<ubx_readfile>>", self.serial_handler.on_read)
self.__master.bind("<<ubx_eof>>", self.serial_handler.on_eof)
self.__master.bind_all("<Control-q>", self.on_exit)
def _set_default_fonts(self):
"""
Set default fonts for entire application
"""
# pylint: disable=attribute-defined-outside-init
self.font_vsm = font.Font(size=8)
self.font_sm = font.Font(size=10)
self.font_md = font.Font(size=12)
self.font_md2 = font.Font(size=14)
self.font_lg = font.Font(size=18)
def toggle_settings(self):
"""
Toggle Settings Frame on or off
"""
self._show_settings = not self._show_settings
self._grid_settings()
def _grid_settings(self):
"""
Set grid position of Settings Frame
"""
if self._show_settings:
self.frm_settings.grid(
column=4, row=1, rowspan=2, padx=2, pady=2, sticky=(N, W, E)
)
self.menu.view_menu.entryconfig(0, label=MENUHIDESE)
else:
self.frm_settings.grid_forget()
self.menu.view_menu.entryconfig(0, label=MENUSHOWSE)
def toggle_status(self):
"""
Toggle Status Bar on or off
"""
self._show_status = not self._show_status
self._grid_status()
def _grid_status(self):
"""
Position Status Bar in grid
"""
if self._show_status:
self.frm_status.grid(
column=0, row=3, columnspan=5, padx=2, pady=2, sticky=(W, E)
)
self.menu.view_menu.entryconfig(1, label=MENUHIDESB)
else:
self.frm_status.grid_forget()
self.menu.view_menu.entryconfig(1, label=MENUSHOWSB)
def toggle_console(self):
"""
Toggle Console frame on or off
"""
self._show_console = not self._show_console
self._grid_console()
self._grid_sats()
self._grid_map()
def _grid_console(self):
"""
Position Console Frame in grid
"""
if self._show_console:
self.frm_console.grid(
column=0, row=1, columnspan=4, padx=2, pady=2, sticky=(N, S, E, W)
)
self.menu.view_menu.entryconfig(2, label=MENUHIDECON)
else:
self.frm_console.grid_forget()
self.menu.view_menu.entryconfig(2, label=MENUSHOWCON)
def toggle_sats(self):
"""
Toggle Satview and Graphview frames on or off
"""
self._show_sats = not self._show_sats
self._grid_sats()
self._grid_map()
def _grid_sats(self):
"""
Position Satview and Graphview Frames in grid
"""
if self._show_sats:
self.frm_satview.grid(column=0, row=2, padx=2, pady=2, sticky=(N, S, E, W))
self.frm_graphview.grid(
column=1, row=2, padx=2, pady=2, sticky=(N, S, E, W)
)
self.menu.view_menu.entryconfig(4, label=MENUHIDESATS)
else:
self.frm_satview.grid_forget()
self.frm_graphview.grid_forget()
self.menu.view_menu.entryconfig(4, label=MENUSHOWSATS)
def toggle_map(self):
"""
Toggle Map Frame on or off
"""
self._show_map = not self._show_map
self._grid_map()
def _grid_map(self):
"""
Position Map Frame in grid
"""
if self._show_map:
self.frm_mapview.grid(column=2, row=2, padx=2, pady=2, sticky=(N, S, E, W))
self.menu.view_menu.entryconfig(3, label=MENUHIDEMAP)
else:
self.frm_mapview.grid_forget()
self.menu.view_menu.entryconfig(3, label=MENUSHOWMAP)
def set_connection(self, message, color="blue"):
"""
Sets connection description in status bar.
:param str message: message to be displayed in connection label
:param str color: rgb color string
"""
self.frm_status.set_connection(message, color)
def set_status(self, message, color="black"):
"""
Sets text of status bar
:param str message: message to be displayed in status label
:param str color: rgb color string
"""
self.frm_status.set_status(message, color)
def about(self):
"""
Open About dialog
"""
AboutDialog(self)
def ubxconfig(self):
"""
Start UBX Config dialog thread
"""
if self._config_thread is None:
self._config_thread = Thread(target=self._ubxconfig_thread, daemon=False)
self._config_thread.start()
def _ubxconfig_thread(self):
"""
THREADED PROCESS UBX Configuration Dialog
"""
self.dlg_ubxconfig = UBXConfigDialog(self)
def stop_config_thread(self):
"""
Stop UBX Configuration dialog thread.
"""
if self._config_thread is not None:
self._config_thread = None
self.dlg_ubxconfig = None
def get_master(self):
"""
Returns application master (Tk)
:return: reference to application master (Tk)
"""
return self.__master
def on_exit(self, *args, **kwargs): # pylint: disable=unused-argument
"""
Kill any running processes and quit application
"""
self.serial_handler.stop_read_thread()
self.serial_handler.stop_readfile_thread()
self.stop_config_thread()
self.serial_handler.disconnect()
self.__master.destroy()
|
Logger.py
|
import threading
Log = print
f = open("./Log.txt", 'a')
cache = []
mutex = threading.Lock()
def writeFile():
global cache
tmp = cache
mutex.acquire()
cache = []
mutex.release()
f.writelines("\n".join(tmp))
f.write("\n")
f.flush()
def writeLog(str):
mutex.acquire()
cache.append(str)
mutex.release()
if len(cache) > 2:
thread = threading.Thread(target=writeFile)
thread.start()
Log = writeLog
# test
'''
import time
def writeLog():
count = 0
while True:
Log("jkjflks %s %s" % (threading.currentThread().getName(), str( count)))
count = count + 1
time.sleep(0.001)
for i in range(10):
thread = threading.Thread(target=writeLog)
thread.setName( str(i))
thread.start()
'''
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represent the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
# disable randomness of hash of string in worker, if this is not
# launched by spark-submit
self.environment["PYTHONHASHSEED"] = "0"
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
if sys.version_info < (2, 7):
warnings.warn("Support for Python 2.6 is deprecated as of Spark 2.0.0")
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = set()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Calling the Java parallelize() method with an ArrayList is too slow,
# because it sends O(n) Py4J commands. As an alternative, serialized
# objects are written to a file and loaded through textFile().
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
serializer.dump_stream(c, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
return RDD(jrdd, self, serializer)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> supress = lock.acquire()
>>> supress = threading.Thread(target=start_job, args=(10,)).start()
>>> supress = threading.Thread(target=stop_job).start()
>>> supress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
self.profiler_collector.show_profiles()
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
self.profiler_collector.dump_profiles(path)
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
controllerClass.py
|
# coding=utf-8
import os
import re
import time
import socket
import inspect
import threading
import subprocess
import logger
gsmThreadName = 'gsmReceptor'
gprsThreadName = 'gprsReceptor'
wifiThreadName = 'wifiReceptor'
emailThreadName = 'emailReceptor'
ethernetThreadName = 'ethernetReceptor'
bluetoothThreadName = 'bluetoothReceptor'
threadNameList = [gsmThreadName, gprsThreadName, wifiThreadName, ethernetThreadName, bluetoothThreadName, emailThreadName]
class Controller(threading.Thread):
availableGsm = False # Indica si el modo GSM está disponible
availableGprs = False # Indica si el modo GPRS está disponible
availableWifi = False # Indica si el modo WIFI está disponible
availableEthernet = False # Indica si el modo ETHERNET está disponible
availableBluetooth = False # Indica si el modo BLUTOOTH está disponible
availableEmail = False # Indica si el modo EMAIL está disponible
gsmInstance = None
gprsInstance = None
wifiInstance = None
ethernetInstance = None
bluetoothInstance = None
emailInstance = None
isActive = False
def __init__(self, _REFRESH_TIME):
threading.Thread.__init__(self, name = 'ControllerThread')
self.REFRESH_TIME = _REFRESH_TIME
def __del__(self):
self.gsmInstance.isActive = False
self.gprsInstance.isActive = False
self.wifiInstance.isActive = False
self.ethernetInstance.isActive = False
self.bluetoothInstance.isActive = False
self.emailInstance.isActive = False
# Esperamos que terminen los hilos receptores
for receptorThread in threading.enumerate():
if receptorThread.getName() in threadNameList and receptorThread.isAlive():
receptorThread.join()
logger.write('INFO', '[CONTROLLER] Objeto destruido.')
def run(self):
self.isActive = True
while self.isActive:
self.availableGsm = self.verifyGsmConnection()
self.availableGprs = self.verifyGprsConnection()
self.availableWifi = self.verifyWifiConnection()
self.availableEthernet = self.verifyEthernetConnection()
self.availableBluetooth = self.verifyBluetoothConnection()
self.availableEmail = self.verifyEmailConnection()
time.sleep(self.REFRESH_TIME)
logger.write('WARNING', '[CONTROLLER] Función \'%s\' terminada.' % inspect.stack()[0][3])
def verifyGsmConnection(self):
# Generamos la expresión regular
ttyUSBPattern = re.compile('ttyUSB[0-9]+')
lsDevProcess = subprocess.Popen(['ls', '/dev/'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
lsDevOutput, lsDevError = lsDevProcess.communicate()
ttyUSBDevices = ttyUSBPattern.findall(lsDevOutput)
# Se detectaron dispositivos USB conectados
for ttyUSBx in reversed(ttyUSBDevices):
# Si el puerto serie nunca fue establecido, entonces la instancia no esta siendo usada
if self.gsmInstance.serialPort is None:
# Si no se produce ningún error durante la configuración, ponemos al módem a recibir SMS y llamadas
if self.gsmInstance.connect('/dev/' + ttyUSBx):
gsmThread = threading.Thread(target = self.gsmInstance.receive, name = gsmThreadName)
logger.write('INFO', '[GSM] Listo para usarse (' + ttyUSBx + ').')
gsmThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# Si el módem ya está en modo activo (funcionando), devolvemos 'True'
elif self.gsmInstance.isActive:
return True
# Llegamos acá si se produce un error en el 'connect' del módem (y todavía está conectado)
else:
return False
# Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar el puerto
if self.gsmInstance.serialPort is not None:
self.gsmInstance.successfulConnection = None
self.gsmInstance.serialPort = None
self.gsmInstance.isActive = False
self.gsmInstance.closePort()
return False
def verifyGprsConnection(self):
# Generamos la expresión regular
pppPattern = re.compile('ppp[0-9]+')
for networkInterface in os.popen('ip link show').readlines():
# Con 'pppPattern.search(networkInterface)' buscamos alguna coincidencia
matchedPattern = pppPattern.search(networkInterface)
# La interfaz actual coincide con un patrón 'ppp'
if matchedPattern is not None and networkInterface.find("state UNKNOWN") > 0:
# Esto se cumple cuando nunca se realizó un intento de configuración
if self.gprsInstance.localInterface is None:
# Obtenemos la interfaz que concide con el patrón
self.gprsInstance.localInterface = matchedPattern.group()
# Obtenemos la dirección IP local asignada estáticamente o por DHCP
commandToExecute = 'ip addr show ' + self.gprsInstance.localInterface + ' | grep inet'
localIPAddress = os.popen(commandToExecute).readline().split()[1].split('/')[0]
# Si no se produce ningún error durante la configuración, ponemos a la IP a escuchar
if self.gprsInstance.connect(localIPAddress):
gprsThread = threading.Thread(target = self.gprsInstance.receive, name = gprsThreadName)
gprsInfo = self.gprsInstance.localInterface + ' - ' + self.gprsInstance.localIPAddress
logger.write('INFO', '[GRPS] Listo para usarse (' + gprsInfo + ').')
gprsThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# El patrón coincidente es igual a la interfaz de la instancia
elif matchedPattern.group() == self.gprsInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.gprsInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# El patrón coincidente está siendo usado pero no es igual a la interfaz de la instancia
else:
continue
# No se encontró coincidencia en la iteración actual, entonces seguimos buscando
else:
continue
# Si entramos es porque había una conexión activa y se perdió
if self.gprsInstance.localInterface is not None:
# Limpiamos todos los campos del objeto NETWORK
self.gprsInstance.successfulConnection = None
self.gprsInstance.localInterface = None
self.gprsInstance.localIPAddress = None
self.gprsInstance.isActive = False
return False
def verifyWifiConnection(self):
# Generamos la expresión regular
wlanPattern = re.compile('wlan[0-9]+')
activeInterfacesList = open('/tmp/activeInterfaces', 'a+').read()
for networkInterface in os.popen('ip link show').readlines():
# Con 'wlanPattern.search(networkInterface)' buscamos alguna coincidencia
matchedPattern = wlanPattern.search(networkInterface)
# La interfaz actual coincide con un patrón 'wlan'
if matchedPattern is not None and networkInterface.find("state UP") > 0:
# El patrón coincidente no está siendo usado y la instancia no está activa (habrá que habilitarla)
if matchedPattern.group() not in activeInterfacesList and self.wifiInstance.localInterface is None:
# Obtenemos la interfaz que concide con el patrón
self.wifiInstance.localInterface = matchedPattern.group()
# Escribimos en nuestro archivo la interfaz, para indicar que está ocupada
activeInterfacesFile = open('/tmp/activeInterfaces', 'a+')
activeInterfacesFile.write(self.wifiInstance.localInterface + '\n')
activeInterfacesFile.close()
# Obtenemos la dirección IP local asignada estáticamente o por DHCP
commandToExecute = 'ip addr show ' + self.wifiInstance.localInterface + ' | grep inet'
localIPAddress = os.popen(commandToExecute).readline().split()[1].split('/')[0]
# Si no se produce ningún error durante la configuración, ponemos a la IP a escuchar
if self.wifiInstance.connect(localIPAddress):
wifiThread = threading.Thread(target = self.wifiInstance.receive, name = wifiThreadName)
wifiInfo = self.wifiInstance.localInterface + ' - ' + self.wifiInstance.localIPAddress
logger.write('INFO', '[WIFI] Listo para usarse (' + wifiInfo + ').')
wifiThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# El patrón coincidente es igual a la interfaz de la instancia
elif matchedPattern.group() == self.wifiInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.wifiInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# El patrón coincidente está siendo usado pero no es igual a la interfaz de la instancia
else:
continue
# No se encontró coincidencia en la iteración actual, entonces seguimos buscando
else:
continue
# Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar la interfaz
if self.wifiInstance.localInterface is not None:
localInterface = self.wifiInstance.localInterface
# Limpiamos todos los campos del objeto NETWORK
self.wifiInstance.successfulConnection = None
self.wifiInstance.localInterface = None
self.wifiInstance.localIPAddress = None
self.wifiInstance.isActive = False
# Eliminamos del archivo la interfaz de red usada
dataToWrite = open('/tmp/activeInterfaces').read().replace(localInterface + '\n', '')
activeInterfacesFile = open('/tmp/activeInterfaces', 'w')
activeInterfacesFile.write(dataToWrite)
activeInterfacesFile.close()
return False
def verifyEthernetConnection(self):
# Generamos la expresión regular
ethPattern = re.compile('eth[0-9]+')
activeInterfacesList = open('/tmp/activeInterfaces', 'a+').read()
for networkInterface in os.popen('ip link show').readlines():
# Con 'ethPattern.search(networkInterface)' buscamos alguna coincidencia
matchedPattern = ethPattern.search(networkInterface)
# La interfaz actual coincide con un patrón 'eth'
if matchedPattern is not None and networkInterface.find("state UP") > 0:
# El patrón coincidente no está siendo usado y la instancia no está activa (habrá que habilitarla)
if matchedPattern.group() not in activeInterfacesList and self.ethernetInstance.localInterface is None:
# Obtenemos la interfaz que concide con el patrón
self.ethernetInstance.localInterface = matchedPattern.group()
# Escribimos en nuestro archivo la interfaz, para indicar que está ocupada
activeInterfacesFile = open('/tmp/activeInterfaces', 'a+')
activeInterfacesFile.write(self.ethernetInstance.localInterface + '\n')
activeInterfacesFile.close()
# Obtenemos la dirección IP local asignada estáticamente o por DHCP
commandToExecute = 'ip addr show ' + self.ethernetInstance.localInterface + ' | grep inet'
localIPAddress = os.popen(commandToExecute).readline().split()[1].split('/')[0]
# Si no se produce ningún error durante la configuración, ponemos a la IP a escuchar
if self.ethernetInstance.connect(localIPAddress):
ethernetThread = threading.Thread(target = self.ethernetInstance.receive, name = ethernetThreadName)
ethernetInfo = self.ethernetInstance.localInterface + ' - ' + self.ethernetInstance.localIPAddress
logger.write('INFO', '[ETHERNET] Listo para usarse (' + ethernetInfo + ').')
ethernetThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# El patrón coincidente es igual a la interfaz de la instancia
elif matchedPattern.group() == self.ethernetInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.ethernetInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# El patrón coincidente está siendo usado pero no es igual a la interfaz de la instancia
else:
continue
# No se encontró coincidencia en la iteración actual, entonces seguimos buscando
else:
continue
# Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar la interfaz
if self.ethernetInstance.localInterface is not None:
localInterface = self.ethernetInstance.localInterface
# Limpiamos todos los campos del objeto NETWORK
self.ethernetInstance.successfulConnection = None
self.ethernetInstance.localInterface = None
self.ethernetInstance.localIPAddress = None
self.ethernetInstance.isActive = False
# Eliminamos del archivo la interfaz de red usada
dataToWrite = open('/tmp/activeInterfaces').read().replace(localInterface + '\n', '')
activeInterfacesFile = open('/tmp/activeInterfaces', 'w')
activeInterfacesFile.write(dataToWrite)
activeInterfacesFile.close()
return False
def verifyBluetoothConnection(self):
activeInterfacesList = open('/tmp/activeInterfaces', 'a+').read()
# Ejemplo de bluetoothDevices: ['Devices:\n', '\thci0\t00:24:7E:64:7B:4A\n']
bluetoothDevices = os.popen('hcitool dev').readlines()
# Sacamos el primer elemento por izquierda ('Devices:\n')
bluetoothDevices.pop(0)
for btDevice in bluetoothDevices:
# Ejemplo de btDevice: \thci0\t00:24:7E:64:7B:4A\n
btInterface = btDevice.split('\t')[1]
btAddress = btDevice.split('\t')[2].replace('\n', '')
# La interfaz encontrada no está siendo usada y la instancia no está activa (habrá que habilitarla)
if btInterface not in activeInterfacesList and self.bluetoothInstance.localInterface is None:
# Obtenemos la interfaz encontrada
self.bluetoothInstance.localInterface = btInterface
# Escribimos en nuestro archivo la interfaz, para indicar que está ocupada
activeInterfacesFile = open('/tmp/activeInterfaces', 'a+')
activeInterfacesFile.write(btInterface + '\n')
activeInterfacesFile.close()
# Si no se produce ningún error durante la configuración, ponemos a la MAC a escuchar
if self.bluetoothInstance.connect(btAddress):
bluetoothThread = threading.Thread(target = self.bluetoothInstance.receive, name = bluetoothThreadName)
bluetoothInfo = self.bluetoothInstance.localInterface + ' - ' + self.bluetoothInstance.localMACAddress
logger.write('INFO', '[BLUETOOTH] Listo para usarse (' + bluetoothInfo + ').')
bluetoothThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# La interfaz encontrada es igual a la interfaz de la instancia
elif btInterface == self.bluetoothInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.bluetoothInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# La interfaz encontrada está siendo usado pero no es igual a la interfaz de la instancia
else:
continue
# Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar la interfaz
if self.bluetoothInstance.localInterface is not None:
localInterface = self.bluetoothInstance.localInterface
# Limpiamos todos los campos del objeto BLUETOOTH
self.bluetoothInstance.successfulConnection = None
self.bluetoothInstance.localMACAddress = None
self.bluetoothInstance.localInterface = None
self.bluetoothInstance.isActive = False
# Eliminamos del archivo la interfaz de red usada
dataToWrite = open('/tmp/activeInterfaces').read().replace(localInterface + '\n', '')
activeInterfacesFile = open('/tmp/activeInterfaces', 'w')
activeInterfacesFile.write(dataToWrite)
activeInterfacesFile.close()
return False
def verifyEmailConnection(self):
TEST_REMOTE_SERVER = 'www.gmail.com'
try:
remoteHost = socket.gethostbyname(TEST_REMOTE_SERVER)
testSocket = socket.create_connection((remoteHost, 80), 2) # Se determina si es alcanzable
# Comprobamos si aún no intentamos conectarnos con los servidores de GMAIL (por eso el 'None')
if self.emailInstance.successfulConnection is None:
# Si no se produce ningún error durante la configuración, ponemos a recibir EMAILs
if self.emailInstance.connect():
emailThread = threading.Thread(target = self.emailInstance.receive, name = emailThreadName)
emailThread.start()
logger.write('INFO', '[EMAIL] Listo para usarse (' + self.emailInstance.emailAccount + ').')
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# Si EMAIL ya está en modo activo (funcionando), devolvemos 'True'
elif self.emailInstance.isActive:
return True
# Llegamos acá si se produce un error en el 'connect' (servidores o puertos mal configurados)
else:
return False
# No hay conexión a Internet (TEST_REMOTE_SERVER no es alcanzable), por lo que se vuelve a intentar
except socket.error as DNSError:
if self.emailInstance.isActive:
self.emailInstance.successfulConnection = None
self.emailInstance.emailAccount = None
self.emailInstance.isActive = False
return False
|
deployovf.py
|
#!/usr/bin/env python
"""
Written by Leon Qin based on VMWare's sample code originally written by Tony Allen
Github: https://github.com/stormbeard
Blog: https://stormbeard.net/
This code has been released under the terms of the Apache 2 licenses
http://www.apache.org/licenses/LICENSE-2.0.html
Script to deploy VM via a single .ovf and a single .vmdk file.
"""
from os import system, path
from sys import exit
from threading import Thread
from time import sleep
from argparse import ArgumentParser
from getpass import getpass
from pyVim.connect import SmartConnectNoSSL, Disconnect
from pyVmomi import vim
def get_args():
"""
Get CLI arguments.
"""
parser = ArgumentParser(description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSphere service to connect to.')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on.')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='Username to use.')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use.')
parser.add_argument('--datacenter_name',
required=False,
action='store',
default=None,
help='Name of the Datacenter you\
wish to use. If omitted, the first\
datacenter will be used.')
parser.add_argument('--datastore_name',
required=False,
action='store',
default=None,
help='Datastore you wish the VM to be deployed to. \
If left blank, VM will be put on the first \
datastore found.')
parser.add_argument('--cluster_name',
required=False,
action='store',
default=None,
help='Name of the cluster you wish the VM to\
end up on. If left blank the first cluster found\
will be used')
parser.add_argument('-v', '--vmdk_path',
required=True,
action='store',
default=None,
help='Path of the VMDK file to deploy.')
parser.add_argument('-f', '--ovf_path',
required=True,
action='store',
default=None,
help='Path of the OVF file to deploy.')
args = parser.parse_args()
if not args.password:
args.password = getpass(prompt='Enter password: ')
return args
def get_ovf_descriptor(ovf_path):
"""
Read in the OVF descriptor.
"""
if path.exists(ovf_path):
with open(ovf_path, 'r') as f:
try:
ovfd = f.read()
f.close()
return ovfd
except:
print ("Could not read file: {}".format(ovf_path))
exit(1)
def get_obj_in_list(obj_name, obj_list):
"""
Gets an object out of a list (obj_list) whos name matches obj_name.
"""
for o in obj_list:
if o.name == obj_name:
return o
print("Unable to find object by the name of {} in list:\n{}".format(obj_name, map(lambda o: o.name, obj_list)))
exit(1)
def get_objects(si, args):
"""
Return a dict containing the necessary objects for deployment.
"""
# Get datacenter object.
datacenter_list = si.content.rootFolder.childEntity
if args.datacenter_name:
datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)
else:
datacenter_obj = datacenter_list[0]
# Get datastore object.
datastore_list = datacenter_obj.datastoreFolder.childEntity
if args.datastore_name:
datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)
elif len(datastore_list) > 0:
datastore_obj = datastore_list[0]
else:
print ("No datastores found in DC ({}).".format(datacenter_obj.name))
# Get cluster object.
cluster_list = datacenter_obj.hostFolder.childEntity
if args.cluster_name:
cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)
elif len(cluster_list) > 0:
cluster_obj = cluster_list[0]
else:
print ("No clusters found in DC ({}).".format(datacenter_obj.name))
# Generate resource pool.
resource_pool_obj = cluster_obj.resourcePool
return {"datacenter": datacenter_obj,
"datastore": datastore_obj,
"resource pool": resource_pool_obj}
def keep_lease_alive(lease):
"""
Keeps the lease alive while POSTing the VMDK.
"""
while(True):
sleep(5)
try:
# Choosing arbitrary percentage to keep the lease alive.
lease.HttpNfcLeaseProgress(50)
if (lease.state == vim.HttpNfcLease.State.done):
return
# If the lease is released, we get an exception.
# Returning to kill the thread.
except:
return
def main():
args = get_args()
ovfd = get_ovf_descriptor(args.ovf_path)
try:
si = SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
except:
print ("Unable to connect to {}".format(args.host))
exit(1)
objs = get_objects(si, args)
manager = si.content.ovfManager
spec_params = vim.OvfManager.CreateImportSpecParams()
import_spec = manager.CreateImportSpec(ovfd,
objs["resource pool"],
objs["datastore"],
spec_params)
lease = objs["resource pool"].ImportVApp(import_spec.importSpec,
objs["datacenter"].vmFolder)
while(True):
if (lease.state == vim.HttpNfcLease.State.ready):
# Assuming single VMDK.
url = lease.info.deviceUrl[0].url.replace('*', args.host)
# Spawn a dawmon thread to keep the lease active while POSTing
# VMDK.
keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
keepalive_thread.start()
# POST the VMDK to the host via curl. Requests library would work
# too.
curl_cmd = (
"curl -Ss -X POST --insecure -T %s -H 'Content-Type: \
application/x-vnd.vmware-streamVmdk' %s" %
(args.vmdk_path, url))
system(curl_cmd)
lease.HttpNfcLeaseComplete()
keepalive_thread.join()
return 0
elif (lease.state == vim.HttpNfcLease.State.error):
print ("Lease error: " + lease.state.error)
exit(1)
connect.Disconnect(si)
if __name__ == "__main__":
exit(main())
|
views.py
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from .models import Transaction, Block, Peer, Lock
from .api.views import getBlockchain, Blockchain
from auth.models import ActiveVoter
from .serializers import TransactionSerializer, BlockSerializer, PeerSerializer
from rest_framework.permissions import AllowAny
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.renderers import TemplateHTMLRenderer
from django.views.decorators.csrf import csrf_protect, csrf_exempt
import json, requests
from json import JSONEncoder, JSONDecoder
from django.urls import reverse
from .utils import JsonApi
import threading
from threading import Thread
@csrf_exempt
def requestLock():
centralizedLockPeer = "192.168.43.59:9000"
address = "http://" + centralizedLockPeer + "/network/api/requestLock/"
response = requests.post(address)
response = json.loads(response.content)
return HttpResponse(json.dumps(response))
@csrf_exempt
def freeLock():
centralizedLockPeer = "192.168.43.59:9000"
address = "http://" + centralizedLockPeer + "/network/api/freeLock/"
response = requests.post(address)
@csrf_exempt
def broadcastTransaction(serializedTransaction, peer):
context = {
'transaction':json.dumps(serializedTransaction.data)
}
address = "http://" + peer.address + "/network/api/createBlock/"
response = requests.post(address, data=context)
response = json.loads(response.content)
return HttpResponse(json.dumps(response))
@csrf_exempt
def broadcastBlock(serializedBlock, peer, validatePackets):
context = {
'block':json.dumps(serializedBlock.data)
}
address = "http://" + peer.address + "/network/api/verifyBlock/"
response = requests.post(address, data=context)
response = json.loads(response.content)
validatePackets.append(response)
return HttpResponse(json.dumps(response))
@csrf_exempt
def blockAcception(serializedBlock, peer):
context = {
'block': json.dumps(serializedBlock.data)
}
address = "http://" + peer.address + "/network/api/blockAcception/"
requests.post(address, data=context)
return
@csrf_exempt
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon=daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
#A main processing function...
@csrf_exempt
def castNewVote(request, candidateId):
if candidateId is not None:
voterId = request.session['voterId']
currentVoter = ActiveVoter.objects.filter(voter_id=voterId)[0]
if currentVoter is not None:
# Transaction Creation...
response = requestLock()
response = json.loads(response.content)
checkLock = response['success']
while(checkLock is not True):
response = requestLock()
response = json.loads(response.content)
checkLock = response['success']
if checkLock is True:
newTransaction = Transaction()
newTransaction.createNewTransaction(voterId, candidateId)
request.session['voteCasted'] = True
transaction = TransactionSerializer(newTransaction)
peerNodes = Peer.objects.filter()
#transaction Broadcasting...
threads = []
for peer in peerNodes:
print("sending to ", peer.address)
t = ThreadWithReturnValue(target=broadcastTransaction, args=(transaction, peer,))
threads.append(t)
response = ""
for thread in threads:
thread.start()
for thread in threads:
response = thread.join()
if response is not None:
break
response = json.loads(response.content)
responseBlock = json.loads(response['block'])
threads.clear()
if response is not None:
block = responseBlock
else:
return HttpResponse("Block Mining Failed !!!")
#Block Broadcasting and Verifying...
newBlock = Block()
prevBlock = Block.objects.filter(hash=block['prev_hash'])[0]
newBlock.createNewBlock(block['transaction_id'], prevBlock)
blockSerializer = BlockSerializer(newBlock)
validatePackets = []
for peer in peerNodes:
t = ThreadWithReturnValue(target=broadcastBlock, args=(blockSerializer, peer, validatePackets))
threads.append(t)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
#Counting Success packets...
successPackets = 0
failurePackets = 0
successHost = None
failureHost = None
for packet in validatePackets:
print(packet)
if packet['success'] is True:
successPackets += 1
successHost = packet['host']
else:
failurePackets += 1
failureHost = packet['host']
#Block Acception and Fault Tolerance...
threads.clear()
if(successPackets >= failurePackets):
for peer in peerNodes:
t = threading.Thread(target=blockAcception, args=(blockSerializer, peer))
threads.append(t)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for packet in validatePackets:
if packet['success'] is False:
address = "http://" + successHost + "/network/api/requestBlockchain/"
context = {
'peer':packet['host']
}
requests.post(address, data=context)
freeLock()
#return HttpResponse("Your vote has been successfully casted !!!")
return render(request, 'voteConfirmation.html')
else:
return HttpResponse("Your vote has not been casted ! Please try again !!!" + str(failurePackets))
else:
return HttpResponse("A problem occured while processing your vote ! Please try again !!!")
else:
return HttpResponse("Invalid request")
|
t.py
|
# Dancing Dron - CrazyFly dancing for music beat
# Cnaan Aviv 2013-10-05
import time, sys
import usb
from threading import Thread
import logging
import cflib
from cflib.crazyflie import Crazyflie
from cfclient.utils.logconfigreader import LogConfig
from cfclient.utils.logconfigreader import LogVariable
#import alsaaudio, time, audioop
# Open the device in nonblocking capture mode. The last argument could
# just as well have been zero for blocking mode. Then we could have
# left out the sleep call in the bottom of the loop
#inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE,alsaaudio.PCM_NONBLOCK)
# Set attributes: Mono, 8000 Hz, 16 bit little endian samples
#inp.setchannels(1)
#inp.setrate(8000)
#inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
# The period size controls the internal number of frames per period.
# The significance of this parameter is documented in the ALSA api.
# For our purposes, it is suficcient to know that reads from the device
# will return this many frames. Each frame being 2 bytes long.
# This means that the reads below will return either 320 bytes of data
# or 0 bytes of data. The latter is possible because we are in nonblocking
# mode.
#inp.setperiodsize(160)
logging.basicConfig(level=logging.INFO)
class Main:
def __init__(self):
#Thread(target=self.micmem).start()
print "x"
def a(self):
print "a"
Main()
|
utils_test.py
|
import asyncio
import collections
from contextlib import contextmanager
import copy
import functools
from glob import glob
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import textwrap
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import dask
from toolz import merge, memoize, assoc
from tornado import gen, queues
from tornado.ioloop import IOLoop
from . import system
from .client import default_client, _global_clients, Client
from .compatibility import WINDOWS
from .comm import Comm
from .config import initialize_logging
from .core import connect, rpc, CommClosedError
from .deploy import SpecCluster
from .metrics import time
from .process import _cleanup_dangling
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
ignoring,
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
_offload_executor,
TimeoutError,
)
from .worker import Worker
from .nanny import Nanny
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
def compile_snippet(code, dedent=True):
if dedent:
code = textwrap.dedent(code)
code = compile(code, "<dynamic>", "exec")
ns = globals()
exec(code, ns, ns)
if sys.version_info >= (3, 5):
compile_snippet(
"""
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
"""
)
assert asyncinc # noqa: F821
else:
asyncinc = None
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = queues.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=1,
disconnect_timeout=3,
scheduler_kwargs={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with ignoring(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with ignoring(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with ignoring(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
await w.terminate(close=True)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=10):
""" Coroutine test
@gen_test(timeout=5)
def test_foo():
yield ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs)
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with ignoring(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
def test_foo(scheduler, worker1, worker2):
yield ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == "closed")
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
await asyncio.sleep(0.05)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
# raise ValueError("Unclosed Comms", L)
print("Unclosed Comms", L)
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
if sys.version_info[0] == 3:
proc.wait(10)
else:
start = time()
while proc.poll() is None and time() < start + 10:
sleep(0.02)
finally:
# Make sure we don't leave the process lingering around
with ignoring(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=None, connection_args=None):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
if timeout is None:
timeout = 0.5
comm = await connect(addr, timeout=timeout, connection_args=connection_args)
comm.abort()
async def assert_cannot_connect(
addr, timeout=None, connection_args=None, exception_class=EnvironmentError
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
if timeout is None:
timeout = 0.5
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, connection_args=connection_args)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(
port, timeout=None, connection_args=None, protocol="tcp"
):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
args = (timeout, connection_args)
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), *args),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), *args),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), *args),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(
port, timeout=None, connection_args=None, protocol="tcp"
):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
args = (timeout, connection_args)
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), *args),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), *args),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), *args),
assert_cannot_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, timeout=None, connection_args=None):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
args = (timeout, connection_args)
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, *args)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, *args),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(
port, timeout=None, connection_args=None
):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
args = (timeout, connection_args)
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, *args),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args),
assert_can_connect("tcp://[::1]:%d" % port, *args),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, timeout=None, connection_args=None):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
args = (timeout, connection_args)
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, *args),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), *args),
assert_can_connect("tcp://[::1]:%d" % port, *args),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), *args)]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler.
"""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(100):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
_cleanup_dangling()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with ignoring(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == "running":
w.loop.add_callback(w.close)
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(n.status == "closed" or n.status == "init" for n in Nanny._instances), {
n: n.status for n in Nanny._instances
}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == "closed" for c in SpecCluster._instances)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with ignoring(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with check_thread_leak():
with check_process_leak():
with check_instances():
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield
|
Signal_Tester.py
|
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for math functions
import math
# used to create threads & dynamic loading of modules
import threading
import multiprocessing
import importlib
# used for directory handling
import glob
#discord needs import request
import requests
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# copy files to log folder
import shutil
# Used to call OCO Script in utilities
import subprocess
# used to display holding coins in an ascii table
from prettytable import PrettyTable
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
DEFAULT_CONFIG_FILE = 'config.yml'
# Load arguments then parse settings
args = parse_args()
mymodule = {}
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
parsed_config = load_config(config_file)
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
def start_signal_thread(module):
try:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = multiprocessing.Process(target=mymodule[module].do_work, args=())
t.name = module
t.daemon = True
t.start()
time.sleep(2)
return t
except Exception as e:
if str(e) == "object of type 'NoneType' has no len()":
print(f'No external signal modules running')
else:
print(f'start_signal_thread(): Loading external signals exception: {e}')
def start_signal_threads():
signal_threads = []
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
#print(f"Starting external signal: {module}")
# add process to a list. This is so the thread can be terminated at a later time
signal_threads.append(start_signal_thread(module))
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
if str(e) == "object of type 'NoneType' has no len()":
print(f'No external signal modules running')
else:
print(f'start_signal_threads(): Loading external signals exception: {e}')
def buy_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.buy")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def balance_report(last_price):
global trade_wins, trade_losses, session_profit_incfees_perc, session_profit_incfees_total,unrealised_session_profit_incfees_perc,unrealised_session_profit_incfees_total
unrealised_session_profit_incfees_perc = 0
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
exposure_calcuated = 0
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
exposure_calcuated = exposure_calcuated + round(float(coins_bought[coin]['bought_at']) * float(coins_bought[coin]['volume']),0)
#PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
PriceChangeIncFees_Total = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
# unrealised_session_profit_incfees_perc = float(unrealised_session_profit_incfees_perc + PriceChangeIncFees_Perc)
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
unrealised_session_profit_incfees_perc = (unrealised_session_profit_incfees_total / BUDGET) * 100
DECIMALS = int(decimals())
# CURRENT_EXPOSURE = round((TRADE_TOTAL * len(coins_bought)), DECIMALS)
CURRENT_EXPOSURE = round(exposure_calcuated, 0)
INVESTMENT_TOTAL = round((TRADE_TOTAL * TRADE_SLOTS), DECIMALS)
# truncating some of the above values to the correct decimal places before printing
WIN_LOSS_PERCENT = 0
if (trade_wins > 0) and (trade_losses > 0):
WIN_LOSS_PERCENT = round((trade_wins / (trade_wins+trade_losses)) * 100, 2)
if (trade_wins > 0) and (trade_losses == 0):
WIN_LOSS_PERCENT = 100
market_profit = ((market_currprice - market_startprice)/ market_startprice) * 100
mode = "Live (REAL MONEY)"
discord_mode = "Live"
if TEST_MODE:
mode = "Test (no real money used)"
discord_mode = "Test"
font = f'{txcolors.ENDC}{txcolors.YELLOW}{txcolors.BOLD}{txcolors.UNDERLINE}'
extsigs = ""
try:
for module in SIGNALLING_MODULES:
if extsigs == "":
extsigs = module
else:
extsigs = extsigs + ', ' + module
except Exception as e:
pass
if extsigs == "":
extsigs = "No external signals running"
print(f'')
print(f'--------')
print(f"STARTED : {str(bot_started_datetime).split('.')[0]} | Running for: {str(datetime.now() - bot_started_datetime).split('.')[0]}")
print(f'CURRENT HOLDS : {len(coins_bought)}/{TRADE_SLOTS} ({float(CURRENT_EXPOSURE):g}/{float(INVESTMENT_TOTAL):g} {PAIR_WITH})')
if REINVEST_PROFITS:
print(f'ADJ TRADE TOTAL : {TRADE_TOTAL:.2f} (Current TRADE TOTAL adjusted to reinvest profits)')
print(f'BUYING MODE : {font if mode == "Live (REAL MONEY)" else txcolors.DEFAULT}{mode}{txcolors.DEFAULT}{txcolors.ENDC}')
print(f'Buying Paused : {bot_paused}')
print(f'')
print(f'SESSION PROFIT (Inc Fees)')
print(f'Realised : {txcolors.SELL_PROFIT if session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Unrealised : {txcolors.SELL_PROFIT if unrealised_session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{unrealised_session_profit_incfees_perc:.4f}% Est:${unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f' Total : {txcolors.SELL_PROFIT if (session_profit_incfees_perc + unrealised_session_profit_incfees_perc) > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc + unrealised_session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total+unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'')
print(f'ALL TIME DATA :')
print(f"Market Profit : {txcolors.SELL_PROFIT if market_profit > 0. else txcolors.SELL_LOSS}{market_profit:.4f}% (BTCUSDT Since STARTED){txcolors.DEFAULT}")
print(f'Bot Profit : {txcolors.SELL_PROFIT if historic_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{historic_profit_incfees_perc:.4f}% Est:${historic_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Completed Trades: {trade_wins+trade_losses} (Wins:{trade_wins} Losses:{trade_losses})')
print(f'Win Ratio : {float(WIN_LOSS_PERCENT):g}%')
print(f'')
print(f'External Signals: {extsigs}')
print(f'--------')
print(f'')
#msg1 = str(bot_started_datetime) + " | " + str(datetime.now() - bot_started_datetime)
msg1 = str(datetime.now()).split('.')[0]
msg2 = " | " + str(len(coins_bought)) + "/" + str(TRADE_SLOTS) + " | PBOT: " + str(bot_paused) + " | MODE: " + str(discord_mode)
msg2 = msg2 + ' SPR%: ' + str(round(session_profit_incfees_perc,2)) + ' SPR$: ' + str(round(session_profit_incfees_total,4))
msg2 = msg2 + ' SPU%: ' + str(round(unrealised_session_profit_incfees_perc,2)) + ' SPU$: ' + str(round(unrealised_session_profit_incfees_total,4))
msg2 = msg2 + ' SPT%: ' + str(round(session_profit_incfees_perc + unrealised_session_profit_incfees_perc,2)) + ' SPT$: ' + str(round(session_profit_incfees_total+unrealised_session_profit_incfees_total,4))
msg2 = msg2 + ' ATP%: ' + str(round(historic_profit_incfees_perc,2)) + ' ATP$: ' + str(round(historic_profit_incfees_total,4))
msg2 = msg2 + ' CTT: ' + str(trade_wins+trade_losses) + ' CTW: ' + str(trade_wins) + ' CTL: ' + str(trade_losses) + ' CTWR%: ' + str(round(WIN_LOSS_PERCENT,2))
msg_discord_balance(msg1, msg2)
history_log(session_profit_incfees_perc, session_profit_incfees_total, unrealised_session_profit_incfees_perc, unrealised_session_profit_incfees_total, session_profit_incfees_perc + unrealised_session_profit_incfees_perc, session_profit_incfees_total+unrealised_session_profit_incfees_total, historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins+trade_losses, trade_wins, trade_losses, WIN_LOSS_PERCENT)
return msg1 + msg2
def history_log(sess_profit_perc, sess_profit, sess_profit_perc_unreal, sess_profit_unreal, sess_profit_perc_total, sess_profit_total, alltime_profit_perc, alltime_profit, total_trades, won_trades, lost_trades, winloss_ratio):
global last_history_log_date
time_between_insertion = datetime.now() - last_history_log_date
# only log balance to log file once every 60 seconds
if time_between_insertion.seconds > 60:
last_history_log_date = datetime.now()
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if not os.path.exists(HISTORY_LOG_FILE):
with open(HISTORY_LOG_FILE,'a+') as f:
f.write('Datetime\tCoins Holding\tTrade Slots\tPausebot Active\tSession Profit %\tSession Profit $\tSession Profit Unrealised %\tSession Profit Unrealised $\tSession Profit Total %\tSession Profit Total $\tAll Time Profit %\tAll Time Profit $\tTotal Trades\tWon Trades\tLost Trades\tWin Loss Ratio\n')
with open(HISTORY_LOG_FILE,'a+') as f:
f.write(f'{timestamp}\t{len(coins_bought)}\t{TRADE_SLOTS}\t{str(bot_paused)}\t{str(round(sess_profit_perc,2))}\t{str(round(sess_profit,4))}\t{str(round(sess_profit_perc_unreal,2))}\t{str(round(sess_profit_unreal,4))}\t{str(round(sess_profit_perc_total,2))}\t{str(round(sess_profit_total,4))}\t{str(round(alltime_profit_perc,2))}\t{str(round(alltime_profit,4))}\t{str(total_trades)}\t{str(won_trades)}\t{str(lost_trades)}\t{str(winloss_ratio)}\n')
if __name__ == '__main__':
print('Starting signals.....')
signalthreads = start_signal_threads()
try:
while True:
volatile_coins = {}
externals = {}
# Check signals and log
externals = buy_external_signals()
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + len(volatile_coins)) < TRADE_SLOTS:
volatile_coins[excoin] = 1
exnumber +=1
print(f"External signal received on {excoin}, purchasing ${TRADE_TOTAL} {PAIR_WITH} value of {excoin}!")
balance_report(last_price)
time.sleep(1)
except KeyboardInterrupt:
sys.exit(0)
|
DependencyNodeTest.py
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import collections
import IECore
import Gaffer
import GafferTest
class DependencyNodeTest( GafferTest.TestCase ) :
def testDirtyOnDisconnect( self ) :
n1 = GafferTest.AddNode( "n1" )
n2 = GafferTest.AddNode( "n2" )
n1["op1"].setValue( 2 )
n1["op2"].setValue( 3 )
dirtied = GafferTest.CapturingSlot( n2.plugDirtiedSignal() )
set = GafferTest.CapturingSlot( n2.plugSetSignal() )
n2["op1"].setInput( n1["sum"] )
self.assertEqual( len( set ), 0 )
self.assertEqual( len( dirtied ), 2 )
self.failUnless( dirtied[0][0].isSame( n2["op1"] ) )
self.failUnless( dirtied[1][0].isSame( n2["sum"] ) )
n2["op1"].setInput( None )
self.assertEqual( len( set ), 1 )
self.failUnless( set[0][0].isSame( n2["op1"] ) )
self.assertEqual( len( dirtied ), 4 )
self.failUnless( dirtied[2][0].isSame( n2["op1"] ) )
self.failUnless( dirtied[3][0].isSame( n2["sum"] ) )
def testDirtyPropagationForCompoundPlugs( self ) :
class CompoundOut( Gaffer.DependencyNode ) :
def __init__( self, name="CompoundOut" ) :
Gaffer.DependencyNode.__init__( self, name )
self["in"] = Gaffer.IntPlug()
self["out"] = Gaffer.CompoundPlug( direction = Gaffer.Plug.Direction.Out )
self["out"]["one"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
self["out"]["two"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
self["behaveBadly"] = Gaffer.BoolPlug( defaultValue = False )
def affects( self, input ) :
outputs = Gaffer.DependencyNode.affects( self, input )
if input.isSame( self["in"] ) :
if self["behaveBadly"].getValue() :
# we're not allowed to return a CompoundPlug in affects() - we're
# just doing it here to make sure we can see that the error is detected.
outputs.append( self["out"] )
else :
# to behave well we must list all leaf level children explicitly.
outputs.extend( self["out"].children() )
return outputs
class CompoundIn( Gaffer.DependencyNode ) :
def __init__( self, name="CompoundIn" ) :
Gaffer.DependencyNode.__init__( self, name )
self["in"] = Gaffer.CompoundPlug()
self["in"]["one"] = Gaffer.IntPlug()
self["in"]["two"] = Gaffer.IntPlug()
self["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
def affects( self, input ) :
# affects should never be called with a CompoundPlug - only
# leaf level plugs.
assert( not input.isSame( self["in"] ) )
outputs = Gaffer.DependencyNode.affects( self, input )
if self["in"].isAncestorOf( input ) :
outputs.append( self["out"] )
return outputs
src = CompoundOut()
dst = CompoundIn()
dst["in"].setInput( src["out"] )
srcDirtied = GafferTest.CapturingSlot( src.plugDirtiedSignal() )
dstDirtied = GafferTest.CapturingSlot( dst.plugDirtiedSignal() )
src["behaveBadly"].setValue( True )
self.assertEqual( len( srcDirtied ), 1 )
self.assertTrue( srcDirtied[0][0].isSame( src["behaveBadly"] ) )
with IECore.CapturingMessageHandler() as mh :
src["in"].setValue( 10 )
self.assertEqual( src["in"].getValue(), 10 )
self.assertEqual( len( mh.messages ), 1 )
self.assertEqual( mh.messages[0].level, IECore.Msg.Level.Error )
self.assertEqual( mh.messages[0].context, "CompoundOut::affects()" )
self.assertEqual( mh.messages[0].message, "Non-leaf plug out returned by affects()" )
src["behaveBadly"].setValue( False )
del srcDirtied[:]
src["in"].setValue( 20 )
srcDirtiedNames = set( [ x[0].fullName() for x in srcDirtied ] )
self.assertEqual( len( srcDirtiedNames ), 4 )
self.assertTrue( "CompoundOut.in" in srcDirtiedNames )
self.assertTrue( "CompoundOut.out.one" in srcDirtiedNames )
self.assertTrue( "CompoundOut.out.two" in srcDirtiedNames )
self.assertTrue( "CompoundOut.out" in srcDirtiedNames )
dstDirtiedNames = set( [ x[0].fullName() for x in dstDirtied ] )
self.assertEqual( len( dstDirtiedNames ), 4 )
self.assertTrue( "CompoundIn.in.one" in dstDirtiedNames )
self.assertTrue( "CompoundIn.in.two" in dstDirtiedNames )
self.assertTrue( "CompoundIn.in" in dstDirtiedNames )
self.assertTrue( "CompoundIn.out" in dstDirtiedNames )
def testAffectsRejectsCompoundPlugs( self ) :
n = GafferTest.CompoundPlugNode()
self.assertRaises( RuntimeError, n.affects, n["p"] )
def testAffectsWorksWithPlugs( self ) :
# check that we can propagate dirtiness for simple Plugs, and
# not just ValuePlugs.
class SimpleDependencyNode( Gaffer.DependencyNode ) :
def __init__( self, name="PassThrough" ) :
Gaffer.DependencyNode.__init__( self, name )
self.addChild( Gaffer.Plug( "in", Gaffer.Plug.Direction.In ) )
self.addChild( Gaffer.Plug( "out", Gaffer.Plug.Direction.Out ) )
def affects( self, input ) :
if input.isSame( self["in"] ) :
return [ self["out"] ]
return []
s1 = SimpleDependencyNode()
s2 = SimpleDependencyNode()
cs = GafferTest.CapturingSlot( s2.plugDirtiedSignal() )
s2["in"].setInput( s1["out"] )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[0][0].isSame( s2["in"] ) )
self.assertTrue( cs[1][0].isSame( s2["out"] ) )
def testEnableBehaviour( self ) :
n = Gaffer.DependencyNode()
self.assertEqual( n.enabledPlug(), None )
m = GafferTest.MultiplyNode()
self.assertEqual( m.enabledPlug(), None )
self.assertEqual( m.correspondingInput( m["product"] ), None )
class EnableAbleNode( Gaffer.DependencyNode ) :
def __init__( self, name = "EnableAbleNode" ) :
Gaffer.DependencyNode.__init__( self, name )
self.addChild( Gaffer.BoolPlug( "enabled", Gaffer.Plug.Direction.In, True ) )
self.addChild( Gaffer.IntPlug( "aIn" ) )
self.addChild( Gaffer.IntPlug( "bIn" ) )
self.addChild( Gaffer.IntPlug( "aOut", Gaffer.Plug.Direction.Out ) )
self.addChild( Gaffer.IntPlug( "bOut", Gaffer.Plug.Direction.Out ) )
self.addChild( Gaffer.IntPlug( "cOut", Gaffer.Plug.Direction.Out ) )
def enabledPlug( self ) :
return self["enabled"]
def correspondingInput( self, output ) :
if output.isSame( self["aOut"] ) :
return self["aIn"]
elif output.isSame( self["bOut"] ) :
return self["bIn"]
return None
e = EnableAbleNode()
self.assertTrue( e.enabledPlug().isSame( e["enabled"] ) )
self.assertTrue( e.correspondingInput( e["aOut"] ).isSame( e["aIn"] ) )
self.assertTrue( e.correspondingInput( e["bOut"] ).isSame( e["bIn"] ) )
self.assertEqual( e.correspondingInput( e["enabled"] ), None )
self.assertEqual( e.correspondingInput( e["aIn"] ), None )
self.assertEqual( e.correspondingInput( e["bIn"] ), None )
self.assertEqual( e.correspondingInput( e["cOut"] ), None )
def testNoDirtiedSignalDuplicates( self ) :
a1 = GafferTest.AddNode()
a2 = GafferTest.AddNode()
a2["op1"].setInput( a1["sum"] )
a2["op2"].setInput( a1["sum"] )
cs = GafferTest.CapturingSlot( a2.plugDirtiedSignal() )
a1["op1"].setValue( 21 )
self.assertEqual( len( cs ), 3 )
self.assertTrue( cs[0][0].isSame( a2["op1"] ) )
self.assertTrue( cs[1][0].isSame( a2["op2"] ) )
self.assertTrue( cs[2][0].isSame( a2["sum"] ) )
def testSettingValueAlsoSignalsDirtiness( self ) :
a = GafferTest.AddNode()
cs = GafferTest.CapturingSlot( a.plugDirtiedSignal() )
a["op1"].setValue( 21 )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[0][0].isSame( a["op1"] ) )
self.assertTrue( cs[1][0].isSame( a["sum"] ) )
def testDirtyPropagationThreading( self ) :
def f() :
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
n3 = GafferTest.AddNode()
n2["op1"].setInput( n1["sum"] )
n2["op2"].setInput( n1["sum"] )
n3["op1"].setInput( n2["sum"] )
for i in range( 1, 100 ) :
cs = GafferTest.CapturingSlot( n3.plugDirtiedSignal() )
n1["op1"].setValue( i )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[0][0].isSame( n3["op1"] ) )
self.assertTrue( cs[1][0].isSame( n3["sum"] ) )
threads = []
for i in range( 0, 10 ) :
t = threading.Thread( target = f )
t.start()
threads.append( t )
for t in threads :
t.join()
def testParentDirtinessSignalledAfterAllChildren( self ) :
n = Gaffer.DependencyNode()
n["i"] = Gaffer.FloatPlug()
n["o"] = Gaffer.V3fPlug( direction = Gaffer.Plug.Direction.Out )
for c in n["o"].children() :
c.setInput( n["i"] )
cs = GafferTest.CapturingSlot( n.plugDirtiedSignal() )
n["i"].setValue( 10 )
self.assertEqual( len( cs ), 5 )
self.assertTrue( cs[0][0].isSame( n["i"] ) )
self.assertTrue( cs[1][0].isSame( n["o"]["x"] ) )
self.assertTrue( cs[2][0].isSame( n["o"]["y"] ) )
self.assertTrue( cs[3][0].isSame( n["o"]["z"] ) )
self.assertTrue( cs[4][0].isSame( n["o"] ) )
def testEfficiency( self ) :
# Node with compound plugs where every child
# of the input compound affects every child
# of the output compound. When a bunch of these
# nodes are connected in series, an explosion
# of plug interdependencies results.
class FanTest( Gaffer.DependencyNode ) :
def __init__( self, name = "FanTest" ) :
Gaffer.DependencyNode.__init__( self, name )
self["in"] = Gaffer.CompoundPlug()
self["out"] = Gaffer.CompoundPlug( direction = Gaffer.Plug.Direction.Out )
for i in range( 0, 10 ) :
self["in"].addChild( Gaffer.IntPlug( "i%d" % i ) )
self["out"].addChild( Gaffer.IntPlug( "o%d" % i, direction = Gaffer.Plug.Direction.Out ) )
def affects( self, input ) :
result = Gaffer.DependencyNode.affects( self, input )
if input.parent().isSame( self["in"] ) :
result.extend( self["out"].children() )
return result
# Connect nodes from top to bottom.
# This is a simpler case, because when
# the connection is made, no downstream
# connections exist which must be checked
# for cycles and flagged for dirtiness
# etc.
f1 = FanTest( "f1" )
f2 = FanTest( "f2" )
f3 = FanTest( "f3" )
f4 = FanTest( "f4" )
f5 = FanTest( "f5" )
f6 = FanTest( "f6" )
f7 = FanTest( "f7" )
f2["in"].setInput( f1["out"] )
f3["in"].setInput( f2["out"] )
f4["in"].setInput( f3["out"] )
f5["in"].setInput( f4["out"] )
f6["in"].setInput( f5["out"] )
f7["in"].setInput( f6["out"] )
f1["in"][0].setValue( 10 )
# Connect nodes from bottom to top.
# This case potentially has even worse
# performance because when each connection
# is made, a downstream network exists.
f1 = FanTest( "f1" )
f2 = FanTest( "f2" )
f3 = FanTest( "f3" )
f4 = FanTest( "f4" )
f5 = FanTest( "f5" )
f6 = FanTest( "f6" )
f7 = FanTest( "f7" )
f7["in"].setInput( f6["out"] )
f6["in"].setInput( f5["out"] )
f5["in"].setInput( f4["out"] )
f4["in"].setInput( f3["out"] )
f3["in"].setInput( f2["out"] )
f2["in"].setInput( f1["out"] )
f1["in"][0].setValue( 10 )
def testDirtyPropagationScoping( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
cs = GafferTest.CapturingSlot( s["n"].plugDirtiedSignal() )
with Gaffer.UndoScope( s ) :
s["n"]["op1"].setValue( 20 )
s["n"]["op2"].setValue( 21 )
# Even though we made two changes, we only want
# dirtiness to have been signalled once, because
# we grouped them logically in an UndoScope.
self.assertEqual( len( cs ), 3 )
self.assertTrue( cs[0][0].isSame( s["n"]["op1"] ) )
self.assertTrue( cs[1][0].isSame( s["n"]["op2"] ) )
self.assertTrue( cs[2][0].isSame( s["n"]["sum"] ) )
# Likewise, when we undo.
del cs[:]
s.undo()
self.assertEqual( len( cs ), 3 )
self.assertTrue( cs[0][0].isSame( s["n"]["op2"] ) )
self.assertTrue( cs[1][0].isSame( s["n"]["op1"] ) )
self.assertTrue( cs[2][0].isSame( s["n"]["sum"] ) )
# And when we redo.
del cs[:]
s.redo()
self.assertEqual( len( cs ), 3 )
self.assertTrue( cs[0][0].isSame( s["n"]["op1"] ) )
self.assertTrue( cs[1][0].isSame( s["n"]["op2"] ) )
self.assertTrue( cs[2][0].isSame( s["n"]["sum"] ) )
def testDirtyPropagationScopingForCompoundPlugInputChange( self ) :
n1 = GafferTest.CompoundPlugNode()
n2 = GafferTest.CompoundPlugNode()
n3 = GafferTest.CompoundPlugNode()
# We never want to be signalled at a point
# when the child connections are not in
# a consistent state.
InputState = collections.namedtuple( "InputState", ( "p", "s", "f" ) )
inputStates = []
def plugDirtied( p ) :
# We can't use self.assert*() in here,
# because exceptions are caught in plugDirtiedSignal()
# handling. So we just record the state to check later
# in assertStatesValid().
inputStates.append(
InputState(
n3["p"].getInput(),
n3["p"]["s"].getInput(),
n3["p"]["f"].getInput()
)
)
def assertStatesValid() :
for state in inputStates :
if state.p is not None :
self.assertTrue( state.s is not None )
self.assertTrue( state.f is not None )
self.assertTrue( state.p.isSame( state.f.parent() ) )
else :
self.assertTrue( state.s is None )
self.assertTrue( state.f is None )
c = n3.plugDirtiedSignal().connect( plugDirtied )
n3["p"].setInput( n1["o"] )
assertStatesValid()
n3["p"].setInput( n2["o"] )
assertStatesValid()
n3["p"].setInput( None )
assertStatesValid()
def testDirtyOnPlugAdditionAndRemoval( self ) :
class DynamicAddNode( Gaffer.ComputeNode ) :
def __init__( self, name = "DynamicDependencies" ) :
Gaffer.ComputeNode.__init__( self, name )
self["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
def affects( self, input ) :
result = Gaffer.DependencyNode.affects( self, input )
if input in self.__inputs() :
result.append( self["out"] )
return result
def hash( self, output, context, h ) :
assert( output.isSame( self["out"] ) )
for plug in self.__inputs() :
plug.hash( h )
def compute( self, output, context ) :
result = 0
for plug in self.__inputs() :
result += plug.getValue()
output.setValue( result )
def __inputs( self ) :
return [ p for p in self.children( Gaffer.IntPlug ) if p.direction() == p.Direction.In ]
valuesWhenDirtied = []
def plugDirtied( plug ) :
if plug.isSame( n["out"] ) :
valuesWhenDirtied.append( plug.getValue() )
n = DynamicAddNode()
c = n.plugDirtiedSignal().connect( plugDirtied )
n["in"] = Gaffer.IntPlug( defaultValue = 1, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( valuesWhenDirtied, [ 1 ] )
del valuesWhenDirtied[:]
del n["in"]
self.assertEqual( valuesWhenDirtied, [ 0 ] )
def testThrowInAffects( self ) :
# Dirty propagation is a secondary process that
# is triggered by primary operations like adding
# plugs, setting values, and changing inputs.
# We don't want errors that occur during dirty
# propagation to prevent the original operation
# from succeeding, so that although dirtiness is
# not propagated fully, the graph itself is in
# an intact state.
node = GafferTest.BadNode()
with IECore.CapturingMessageHandler() as mh :
node["in3"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
# We want the addition of the child to have succeeded.
self.assertTrue( "in3" in node )
# And to have been informed of the bug in BadNode.
self.assertEqual( len( mh.messages ), 1 )
self.assertEqual( mh.messages[0].level, mh.Level.Error )
self.assertEqual( mh.messages[0].context, "BadNode::affects()" )
self.assertTrue( "BadNode is bad" in mh.messages[0].message )
with IECore.CapturingMessageHandler() as mh :
del node["in3"]
# We want the removal of the child to have succeeded.
self.assertTrue( "in3" not in node )
# And to have been informed of the bug in BadNode.
self.assertEqual( len( mh.messages ), 1 )
self.assertEqual( mh.messages[0].level, mh.Level.Error )
self.assertEqual( mh.messages[0].context, "BadNode::affects()" )
self.assertTrue( "BadNode is bad" in mh.messages[0].message )
# And after all that, we still want dirty propagation to work properly.
cs = GafferTest.CapturingSlot( node.plugDirtiedSignal() )
node["in1"].setValue( 10 )
self.assertTrue( node["out1"] in [ c[0] for c in cs ] )
def testDeleteNodesInInputChanged( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.MultiplyNode()
s["n2"] = GafferTest.MultiplyNode()
s["n3"] = GafferTest.MultiplyNode()
s["n4"] = Gaffer.Node()
s["n3"]["op1"].setInput( s["n2"]["product"] )
s["n4"]["user"]["d"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n4"]["user"]["d"].setInput( s["n3"]["product"] )
def inputChanged( plug ) :
del s["n3"]
del s["n4"]
c = s["n2"].plugInputChangedSignal().connect( inputChanged )
s["n2"]["op1"].setInput( s["n1"]["product"] )
if __name__ == "__main__":
unittest.main()
|
detect_service.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making Metis available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
import time
import os
import threading
from app.dao.time_series_detector import anomaly_op
from app.dao.time_series_detector import sample_op
from app.dao.time_series_detector import train_op
from time_series_detector.algorithm import xgboosting
from time_series_detector import detect
from app.common.errorcode import *
from app.common.common import *
from time_series_detector.common.tsd_errorcode import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), './model/')
class DetectService(object):
def __init__(self):
self.sample_op_obj = sample_op.SampleOperation()
self.anomaly_op_obj = anomaly_op.AbnormalOperation()
self.detect_obj = detect.Detect()
def __generate_model(self, data, task_id):
"""
Start train a model
:param data: Training dataset.This is a list and data such as below:
data -> samples_list=[{"flag": x, "data": "346", "353", "321", ...},
{"flag": y, "data": "346", "353", "321", ...},
{"flag": z, "data": "346", "353", "321", ...},
......
]
:param task_id: The id of the training task.
"""
xgb_obj = xgboosting.XGBoosting()
# pylint: disable=unused-variable
# jizhi 调用 xgboost 算法模型,
# 传输参数:
# task_id - 时间戳
# data - 样本数据集
# 返回值含义:
# ret_code: 执行正确or错误码
ret_code, ret_data = xgb_obj.xgb_train(data, task_id)
current_timestamp = int(time.time())
# jizhi 初始化数据库的链接
train_op_obj = train_op.TrainOperation()
if ret_code == 0:
train_status = "complete"
params = {
"task_id": task_id,
"end_time": current_timestamp,
"status": train_status,
"model_name": task_id + "_model"
}
else:
train_status = "failed"
params = {
"task_id": task_id,
"end_time": current_timestamp,
"status": train_status,
"model_name": ""
}
# jizhi 到此,模型训练成功,在表 train_task 更新训练完成的模型信息
train_op_obj.update_model_info(params)
def process_train(self, data):
"""
Start a process to train model
:param data: Training dataset.
"""
sample_params = {
"trainOrTest": data["trainOrTest"],
"positiveOrNegative": data["positiveOrNegative"],
"source": data["source"],
"beginTime": data["beginTime"],
"endTime": data["endTime"]
}
# jizhi 依据页面选择的样本信息,从数据库中获取样本数据
# jizhi 调用dao.time_series_detector.sample_op.SampleOperation.sample_query_all()函数,
# 依据前端选择的训练集和时间信息从数据库 metis 对应的 sample_dataset 表中抽取数据,返回 sample_list
samples = self.sample_op_obj.sample_query_all(sample_params)
# jizhi 返回的数据列表 samples 内容形式:
# samples -> samples_list[{"flag": "0", "data": "660,719,649 ...", 642,758,777 ...", "698,644,691 ..."},
# {"flag": "1", "data": "660,719,649 ...", 642,758,777 ...", "698,644,691 ..."},
# {"flag": "0", "data": "660,719,649 ...", 642,758,777 ...", "698,644,691 ..."},
# ......
# ]
# jizhi 初始化训练模型函数 app.dao.time_series_detector.TrainOperation(), 就是初始化连接数据库
train_op_obj = train_op.TrainOperation()
samples_list = []
positive_count = 0
negative_count = 0
# jizhi samples 是返回的样本数据列表
for index in samples:
# jizhi map函数将str列表转化成int字典值value
samples_list.append({"flag": index["flag"], "data": map(int, index["data"].split(','))})
# jizhi 统计正负样本数量
if index["flag"] == 1:
positive_count = positive_count + 1
else:
negative_count = negative_count + 1
# jizhi round 函数实现对当前时间戳 time.time() 浮点值扩大 1000 倍的四舍五入值
task_id = str(int(round(time.time() * 1000)))
train_params = {
"begin_time": int(time.time()),
"end_time": int(time.time()),
"task_id": task_id,
"status": "running",
"source": data["source"],
"sample_num": len(samples_list),
"postive_sample_num": positive_count,
"negative_sample_num": negative_count
}
if positive_count == 0 or negative_count == 0:
return build_ret_data(LACK_SAMPLE, "")
# jizhi 插入数据到 metis 数据库的表 train_task 中,状态是 running
train_op_obj.insert_train_info(train_params)
try:
# jizhi 到此完成数据准备,开始调用算法进行模型训练
# jizhi 传入参数,samples_list 处理好的数据和 task_id 时间戳
# jizhi 算法计算时,是一次性传入全部 A B C 的数据
t = threading.Thread(target=self.__generate_model, args=(samples_list, task_id, ))
t.setDaemon(False)
t.start()
except Exception:
train_status = "failed"
params = {
"task_id": task_id,
"end_time": int(time.time()),
"status": train_status,
"model_name": ""
}
# jizhi 训练模型失败,更新模型信息
train_op_obj.update_model_info(params)
return build_ret_data(OP_SUCCESS, "")
def __list_is_digit(self, data):
for index in data:
try:
float(index)
except ValueError:
return False
return True
def __check_param(self, data):
if ("viewName" not in data.keys()) or ("viewId" not in data.keys()) or ("attrId" not in data.keys()) or ("attrName" not in data.keys()) or ("time" not in data.keys()) or ("dataC" not in data.keys()) or ("dataB" not in data.keys()) or ("dataA" not in data.keys()):
return CHECK_PARAM_FAILED, "missing parameter"
return OP_SUCCESS, ""
def value_predict(self, data):
ret_code, ret_data = self.__check_param(data)
if ret_code != OP_SUCCESS:
return build_ret_data(ret_code, ret_data)
ret_code, ret_data = self.detect_obj.value_predict(data)
if ret_code == TSD_OP_SUCCESS and ret_data["ret"] == 0:
anomaly_params = {
"view_id": data["viewId"],
"view_name": data["viewName"],
"attr_id": data["attrId"],
"attr_name": data["attrName"],
"time": data["time"],
"data_c": data["dataC"],
"data_b": data["dataB"],
"data_a": data["dataA"]
}
self.anomaly_op_obj.insert_anomaly(anomaly_params)
return build_ret_data(ret_code, ret_data)
def rate_predict(self, data):
ret_code, ret_data = self.__check_param(data)
if ret_code != OP_SUCCESS:
return build_ret_data(ret_code, ret_data)
ret_data, ret_data = self.detect_obj.rate_predict(data)
if ret_code == TSD_OP_SUCCESS and ret_data["ret"] == 0:
anomaly_params = {
"view_id": data["viewId"],
"view_name": data["viewName"],
"attr_id": data["attrId"],
"attr_name": data["attrName"],
"time": data["time"],
"data_c": data["dataC"],
"data_b": data["dataB"],
"data_a": data["dataA"]
}
self.anomaly_op_obj.insert_anomaly(anomaly_params)
return build_ret_data(OP_SUCCESS, ret_data)
|
KMControllers.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 10 16:50:24 2017
@author: takata@innovotion.co.jp
"""
import serial,struct
import threading
import time
import atexit
import signal
# from bluepy import btle
def float2bytes(float_value):
float_value=float(float_value)
return struct.pack("!f", float_value)
def bytes2float(byte_array):
return struct.unpack('!f',byte_array)[0]
def uint8_t2bytes(uint8_value):
uint8_value=int(uint8_value)
if uint8_value>256-1:
uint8_value=256-1
return struct.pack("B",uint8_value)
def uint16_t2bytes(uint16_value):
uint16_value=int(uint16_value)
if uint16_value>256**2-1:
uint16_value=256**2-1
val1=int(uint16_value/256)
val2=uint16_value-val1*256
return struct.pack("BB",val1,val2)
def bytes2uint16_t(ba):
return struct.unpack("BB",ba)[0]
def bytes2uint8_t(ba):
return struct.unpack("B",ba)[0]
def bytes2int16_t(ba):
return struct.unpack(">h",ba)[0]
def uint32_t2bytes(uint32_value):
uint32_value=int(uint32_value)
if uint32_value>256**4-1:
uint32_value=256**4-1
val1=int(uint32_value/256**3)
val2=int((uint32_value-val1*256**3)/256**2)
val3=int((uint32_value-val1*256**3-val2*256**2)/256)
val4=uint32_value-val1*256**3-val2*256**2-val3*256
return struct.pack("BBBB",val1,val2,val3,val4)
class Controller:
def __init__(self):
pass
def run_command(self,val,characteristics):
print(val,characteristics)
# Settings
def maxSpeed(self,max_speed,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the maximum speed of rotation to the 'max_speed' in rad/sec.
"""
command=b'\x02'
values=float2bytes(max_speed)
self.run_command(command+identifier+values+crc16,'motor_settings')
def minSpeed(self,min_speed,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the minimum speed of rotation to the 'min_speed' in rad/sec.
"""
command=b'\x03'
values=float2bytes(min_speed)
self.run_command(command+identifier+values+crc16,'motor_settings')
def curveType(self,curve_type,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the acceleration or deceleration curve to the 'curve_type'.
typedef enum curveType =
{
CURVE_TYPE_NONE = 0, // Turn off Motion control
CURVE_TYPE_TRAPEZOID = 1, // Turn on Motion control with trapezoidal curve
}
"""
command=b'\x05'
values=uint8_t2bytes(curve_type)
self.run_command(command+identifier+values+crc16,'motor_settings')
def acc(self,_acc,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the acceleration of rotation to the positive 'acc' in rad/sec^2.
"""
command=b'\x07'
values=float2bytes(_acc)
self.run_command(command+identifier+values+crc16,'motor_settings')
def dec(self,_dec,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the deceleration of rotation to the positive 'dec' in rad/sec^2.
"""
command=b'\x08'
values=float2bytes(_dec)
self.run_command(command+identifier+values+crc16,'motor_settings')
def maxTorque(self,max_torque,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the maximum torque to the positive 'max_torque' in N.m.
"""
command=b'\x0E'
values=float2bytes(max_torque)
self.run_command(command+identifier+values+crc16,'motor_settings')
def qCurrentP(self,q_current_p,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the q-axis current PID controller's Proportional gain to the postiive 'q_current_p'.
"""
command=b'\x18'
values=float2bytes(q_current_p)
self.run_command(command+identifier+values+crc16,'motor_settings')
def qCurrentI(self,q_current_i,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the q-axis current PID controller's Integral gain to the positive 'q_current_i'.
"""
command=b'\x19'
values=float2bytes(q_current_i)
self.run_command(command+identifier+values+crc16,'motor_settings')
def qCurrentD(self,q_current_d,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the q-axis current PID controller's Differential gain to the postiive 'q_current_d'.
"""
command=b'\x1A'
values=float2bytes(q_current_d)
self.run_command(command+identifier+values+crc16,'motor_settings')
def speedP(self,speed_p,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the speed PID controller's Proportional gain to the positive 'speed_p'.
"""
command=b'\x1B'
values=float2bytes(speed_p)
self.run_command(command+identifier+values+crc16,'motor_settings')
def speedI(self,speed_i,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the speed PID controller's Integral gain to the positive 'speed_i'.
"""
command=b'\x1C'
values=float2bytes(speed_i)
self.run_command(command+identifier+values+crc16,'motor_settings')
def speedD(self,speed_d,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the speed PID controller's Deferential gain to the positive 'speed_d'.
"""
command=b'\x1D'
values=float2bytes(speed_d)
self.run_command(command+identifier+values+crc16,'motor_settings')
def positionP(self,position_p,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the position PID controller's Proportional gain to the positive 'position_p'.
"""
command=b'\x1E'
values=float2bytes(position_p)
self.run_command(command+identifier+values+crc16,'motor_settings')
def resetPID(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Reset all the PID parameters to the firmware default settings.
"""
command=b'\x22'
self.run_command(command+identifier+crc16,'motor_settings')
def ownColor(self,red,green,blue,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the own LED color.
"""
command=b'\x3A'
values=uint8_t2bytes(red)+uint8_t2bytes(green)+uint8_t2bytes(blue)
self.run_command(command+identifier+values+crc16,'motor_settings')
def readRegister(self,register,identifier=b'\x00\x00',crc16=b'\x00\x00'):
'''
Read a specified setting (register).
'''
command=b'\x40'
values=uint8_t2bytes(register)
self.run_command(command+identifier+values+crc16,'motor_settings')
def saveAllRegisters(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Save all settings (registers) in flash memory.
"""
command=b'\x41'
self.run_command(command+identifier+crc16,'motor_settings')
def resetRegister(self,register,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Reset a specified register's value to the firmware default setting.
"""
command=b'\x4E'
values=uint8_t2bytes(register)
self.run_command(command+identifier+values+crc16,'motor_settings')
def resetAllRegisters(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Reset all registers' values to the firmware default setting.
"""
command=b'\x4F'
self.run_command(command+identifier+crc16,'motor_settings')
# Motor Action
def disable(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Disable motor action.
"""
command=b'\x50'
self.run_command(command+identifier+crc16,'motor_control')
def enable(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Enable motor action.
"""
command=b'\x51'
self.run_command(command+identifier+crc16,'motor_control')
def speed(self,speed,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the speed of rotation to the positive 'speed' in rad/sec.
"""
command=b'\x58'
values=float2bytes(speed)
self.run_command(command+identifier+values+crc16,'motor_control')
def presetPosition(self,position,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Preset the current absolute position as the specified 'position' in rad. (Set it to zero when setting origin)
"""
command=b'\x5A'
values=float2bytes(position)
self.run_command(command+identifier+values+crc16,'motor_control')
def runForward(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Rotate the motor forward (counter clock-wise) at the speed set by 0x58: speed.
"""
command=b'\x60'
self.run_command(command+identifier+crc16,'motor_control')
def runReverse(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Rotate the motor reverse (clock-wise) at the speed set by 0x58: speed.
"""
command=b'\x61'
self.run_command(command+identifier+crc16,'motor_control')
#fix::[2019/01/29]runコマンドを追加
def run(self,velocity, identifier=b'\x00\x00', crc16=b'\x00\x00'):
"""
Rotate the motor reverse (clock-wise) at the speed set by 0x58: speed.
"""
command = b'\x62'
values = float2bytes(velocity)
self.run_command(command + identifier +values+ crc16, 'motor_control')
def moveTo(self,position,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Move the motor to the specified absolute 'position' at the speed set by 0x58: speed.
"""
command=b'\x66'
values=float2bytes(position)
self.run_command(command+identifier+values+crc16,'motor_control')
def moveBy(self,distance,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Move motor by the specified relative 'distance' from the current position at the speed set by 0x58: speed.
"""
command=b'\x68'
values=float2bytes(distance)
self.run_command(command+identifier+values+crc16,'motor_control')
def free(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Stop the motor's excitation
"""
command=b'\x6C'
self.run_command(command+identifier+crc16,'motor_control')
def stop(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Decelerate the speed to zero and stop.
"""
command=b'\x6D'
self.run_command(command+identifier+crc16,'motor_control')
def holdTorque(self,torque,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Keep and output the specified torque.
"""
command=b'\x72'
values=float2bytes(torque)
self.run_command(command+identifier+values+crc16,'motor_control')
def doTaskSet(self,index,repeating,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Do taskset at the specified 'index' 'repeating' times.
"""
command=b'\x81'
values=uint16_t2bytes(index)+uint32_t2bytes(repeating)
self.run_command(command+identifier+values+crc16,'motor_control')
def preparePlaybackMotion(self,index,repeating,option,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Prepare to playback motion at the specified 'index' 'repeating' times.
"""
command=b'\x86'
values=uint16_t2bytes(index)+uint32_t2bytes(repeating)+uint8_t2bytes(option)
self.run_command(command+identifier+values+crc16,'motor_control')
def startPlaybackMotionV2(self, identifier=b'\x00\x00', crc16=b'\x00\x00'):#info::ver 1.18以降
"""
Start to playback motion in the condition of the last preparePlaybackMotion.
"""
command = b'\x85'
self.run_command(command + identifier + crc16, 'motor_control')
def startPlaybackMotion(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Start to playback motion in the condition of the last preparePlaybackMotion.
"""
command=b'\x87'
self.run_command(command+identifier+crc16,'motor_control')
def stopPlaybackMotion(self, identifier=b'\x00\x00', crc16=b'\x00\x00'):
"""
Stop to playback motion.
"""
command = b'\x88'
self.run_command(command + identifier + crc16, 'motor_control')
# Queue
def pause(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Pause the queue until 0x91: resume is executed.
"""
command=b'\x90'
self.run_command(command+identifier+crc16,'motor_control')
def resume(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Resume the queue.
"""
command=b'\x91'
self.run_command(command+identifier+crc16,'motor_control')
def wait(self,time,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Wait the queue or pause the queue for the specified 'time' in msec and resume it automatically.
"""
command=b'\x92'
values=uint32_t2bytes(time)
self.run_command(command+identifier+values+crc16,'motor_control')
def reset(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Reset the queue. Erase all tasks in the queue. This command works when 0x90: pause or 0x92: wait are executed.
"""
command=b'\x95'
self.run_command(command+identifier+crc16,'motor_control')
# Taskset
def startRecordingTaskset(self,index,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Start recording taskset at the specified 'index' in the flash memory.
In the case of KM-1, index value is from 0 to 49 (50 in total).
"""
command=b'\xA0'
values=uint16_t2bytes(index)
self.run_command(command+identifier+values+crc16,'motor_control')
def stopRecordingTaskset(self,index,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Stop recording taskset.
This command works while 0xA0: startRecordingTaskset is executed.
"""
command=b'\xA2'
values=uint16_t2bytes(index)
self.run_command(command+identifier+values+crc16,'motor_control')
def eraseTaskset(self,index,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Erase taskset at the specified index in the flash memory.
In the case of KM-1, index value is from 0 to 49 (50 in total).
"""
command=b'\xA3'
values=uint16_t2bytes(index)
self.run_command(command+identifier+values+crc16,'motor_control')
def eraseAllTaskset(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Erase all tasksets in the flash memory.
"""
command=b'\xA4'
self.run_command(command+identifier+crc16,'motor_control')
# Teaching
def prepareTeachingMotion(self,index,time,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Prepare teaching motion by specifying the 'index' in the flash memory and recording 'time' in milliseconds.
In the case of KM-1, index value is from 0 to 9 (10 in total). Recording time cannot exceed 65408 [msec].
"""
command=b'\xAA'
values=uint16_t2bytes(index)+uint32_t2bytes(time)
self.run_command(command+identifier+values+crc16,'motor_control')
def startTeachingMotionV2(self, identifier=b'\x00\x00', crc16=b'\x00\x00'):# info::ver 1.18以降
"""
Start teaching motion in the condition of the last prepareTeachingMotion.
This command works when the teaching index is specified by 0xAA: prepareTeachingMotion.
"""
command = b'\xA9'
self.run_command(command + identifier + crc16, 'motor_control')
def startTeachingMotion(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Start teaching motion in the condition of the last prepareTeachingMotion.
This command works when the teaching index is specified by 0xAA: prepareTeachingMotion.
"""
command=b'\xAB'
self.run_command(command+identifier+crc16,'motor_control')
def stopTeachingMotion(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Stop teaching motion.
"""
command=b'\xAC'
self.run_command(command+identifier+crc16,'motor_control')
def eraseMotion(self,index,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Erase motion at the specified index in the flash memory.
In the case of KM-1, index value is from 0 to 9 (10 in total).
"""
command=b'\xAD'
values=uint16_t2bytes(index)
self.run_command(command+identifier+values+crc16,'motor_control')
def eraseAllMotion(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Erase all motion in the flash memory.
"""
command=b'\xAE'
self.run_command(command+identifier+crc16,'motor_control')
# LED
def led(self,ledState,red,green,blue,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Set the LED state (off, solid, flash and dim) and color intensity (red, green and blue).
typedef enum ledState =
{
LED_STATE_OFF = 0, // LED off
LED_STATE_ON_SOLID = 1, // LED solid
LED_STATE_ON_FLASH = 2, // LED flash
LED_STATE_ON_DIM = 3 // LED dim
}
"""
command=b'\xE0'
values=uint8_t2bytes(ledState)+uint8_t2bytes(red)+uint8_t2bytes(green)+uint8_t2bytes(blue)
self.run_command(command+identifier+values+crc16,"motor_control")
# IMU
def enableIMU(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Enable the IMU and start notification of the measurement values.
This command is only available for BLE (not implemented on-wired.)
When this command is executed, the IMU measurement data is notified to BLE IMU Measuement characteristics.
"""
command=b'\xEA'
self.run_command(command+identifier+crc16,'motor_control')
def disableIMU(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Disable the IMU and stop notification of the measurement values.
"""
command=b'\xEB'
self.run_command(command+identifier+crc16,'motor_control')
# System
def reboot(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Reboot the system.
"""
command=b'\xF0'
self.run_command(command+identifier+crc16,'motor_control')
def enterDeviceFirmwareUpdate(self,identifier=b'\x00\x00',crc16=b'\x00\x00'):
"""
Enter the device firmware update mode.
Enter the device firmware update mode or bootloader mode. It goes with reboot.
"""
command=b'\xFD'
self.run_command(command+identifier+crc16,'motor_control')
class USBController(Controller):
def __init__(self,port='/dev/ttyUSB0'):
# signal.signal(signal.SIGINT, self.all_done)
# signal.signal(signal.SIGTERM, self.all_done)
self.serial_bef=[]
self.port=port
self.serial=serial.Serial(port,115200,8,'N',1,None,False,True)
self.on_motor_measurement_cb=False
self.on_motor_connection_error_cb = False
self.t = threading.Thread(target=self._serial_schedule_worker)
self.t.setDaemon(True)
self.t.start()
atexit.register(self.all_done)
def all_done(self):
try:
if self.t.isAlive():
self.t.join(0.01)
self.serial.close()
except:
return
def delete(self):
try:
if self.t.isAlive():
self.t.join(0.01)
self.serial.close()
except:
return
def run_command(self,val,characteristics=None):
try:
self.serial.write(val)
except serial.SerialException as e:
self.serial.close()
# There is no new data from serial port
if (callable(self.on_motor_connection_error_cb)):
self.on_motor_connection_error_cb(e)
return e
except TypeError as e:
# Disconnect of USB->UART occured
self.serial.close()
if (callable(self.on_motor_connection_error_cb)):
self.on_motor_connection_error_cb(e)
return e
except IOError as e:
self.serial.close()
if (callable(self.on_motor_connection_error_cb)):
self.on_motor_connection_error_cb(e)
return e
#------------------------------#
# USBnotify切り替え モーター制御手段(インターフェイス)の設定(新 1-6 MOTOR_SETTING)
# ------------------------------#
#uint8_t flags ビットにより、含まれるパラメータを指定する(1の場合含む・0の場合含まない)
#bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0
#物理 有線 有線 無線
#ボタン * * I2C USB * * BLE
@property
def interface_type(self):
return {
"BLE":0b1,
"USB": 0b1000,
"I2C": 0b10000,
"HDDBTN": 0b10000000,
}
def interface(self,flg=0b1000,identifier=b'\x00\x00',crc16=b'\x00\x00'):
command = b'\x2E'
values = uint8_t2bytes(flg)
self.run_command(command + identifier+values + crc16, 'motor_settings')
def _serial_schedule_worker(self):
while True:
time.sleep(100/1000)#100ms
e_res= self._read_motor_measurement()
if e_res:#例外発生でスレッド停止
break
def _read_motor_measurement(self):
#rd = self.serial.read(self.serial.inWaiting())
try:
rd = self.serial.read(self.serial.inWaiting())
except serial.SerialException as e:
self.serial.close()
# There is no new data from serial port
if (callable(self.on_motor_connection_error_cb)):
self.on_motor_connection_error_cb(e)
return e
except TypeError as e:
# Disconnect of USB->UART occured
self.serial.close()
if (callable(self.on_motor_connection_error_cb)):
self.on_motor_connection_error_cb(e)
return e
except IOError as e:
self.serial.close()
if (callable(self.on_motor_connection_error_cb)):
self.on_motor_connection_error_cb(e)
return e
for bt in rd:
if type(bt) is str:
self.serial_bef.append(ord(bt))
elif type(bt) is int:
self.serial_bef.append(bt)
#print bt.encode('hex')
#------------------------------#
# プリアンブル検出ロジック #todo::バイト配列->バイト文字列で扱うように変更
# ------------------------------#
sv_len = len(self.serial_bef)
is_pre = False #プリアンブル検出したか
if (sv_len < 8):
return
slice_idx = sv_len #抽出済みとしてバッファーから削除するインデックス
bf_len=len(self.serial_bef)
for i in range(bf_len):
#プリアンブル検出
if (i+3 < bf_len and self.serial_bef[i] ==0x00 and self.serial_bef[i+1] == 0x00 and self.serial_bef[i+2] ==0xAA and self.serial_bef[i+3] == 0xAA and not is_pre):
is_pre=True
slice_idx=i
for ie in range(i+4,sv_len,1):
#ポストアンブル検出
if (ie+3 < bf_len and self.serial_bef[ie + 2] == 0x0D and self.serial_bef[ie+3] ==0x0A):
crc = self.serial_bef[ie] << 8 | self.serial_bef[ie + 1] #CRC
payload = self.serial_bef[i + 4: ie] #情報バイト
val=self._serialdataParse(payload)
slice_idx = ie + 4
i = ie + 3
is_pre = False
#fix::[20190124harada]取得情報をモーター回転情報に制限
if (val['type']==0xB4 and callable(self.on_motor_measurement_cb)):
self.on_motor_measurement_cb(val['payload'])
break
self.serial_bef=self.serial_bef[slice_idx:]
def _serialdataParse(self,uint8List):
v_len =len(uint8List)
if (v_len < 3 or uint8List[0] != v_len):
return {'type':None,'payload':None}
type=uint8List[1]
payload=uint8List[2:]
if type ==0xB4:#モーター回転情報受信
#todo::バイト配列->バイト文字列で扱うように変更
pos_b=''.join(map(lambda uint_num:struct.pack("B", uint_num),payload[0:4]))
vel_b = ''.join(map(lambda uint_num: struct.pack("B", uint_num), payload[4:8]))
tlq_b = ''.join(map(lambda uint_num: struct.pack("B", uint_num), payload[8:12]))
self.position = bytes2float(pos_b)
self.velocity = bytes2float(vel_b)
self.torque = bytes2float(tlq_b)
return {'type':type,'payload':{'position':self.position,'velocity':self.velocity,'torque':self.torque}}#fix::[20190124harada]データタイプを追加
# return {'type':type,'payload':{self.position,self.velocity,self.torque}}#fix::[20190124harada]データタイプを追加
# if type == 0xB5: #todo::ジャイロ情報
# if type == 0xBE: #todo::エラーコード情報
# if type == 0x40: #todo::レジスター読み取りコマンド実行時の返り値
else:
return {'type':None,'payload':None}
class BLEController(Controller):
def __init__(self,addr):
self.address=addr
self.dev=btle.Peripheral(self.address,'random')
self.position=0.0
self.velocity=0.0
self.torque=0.0
self.accel_x=0.0
self.accel_y=0.0
self.accel_z=0.0
self.temp=0
self.gyro_x=0
self.gyro_y=0
self.gyro_z=0
for v in self.dev.getCharacteristics():
if v.uuid=='f1400001-8936-4d35-a0ed-dfcd795baa8c':
self.motor_control_handle=v.getHandle()
if v.uuid=='f1400003-8936-4d35-a0ed-dfcd795baa8c':
self.motor_led_handle=v.getHandle()
if v.uuid=='f1400004-8936-4d35-a0ed-dfcd795baa8c':
self.motor_measurement_handle=v.getHandle()
if v.uuid=='f1400005-8936-4d35-a0ed-dfcd795baa8c':
self.motor_imu_measurement_handle=v.getHandle()
if v.uuid=='f1400006-8936-4d35-a0ed-dfcd795baa8c':
self.motor_settings_handle=v.getHandle()
def run_command(self,val,characteristics=None):
if characteristics=='motor_control':
self.dev.writeCharacteristic(self.motor_control_handle,val)
elif characteristics=='motor_led':
self.dev.writeCharacteristic(self.motor_led_handle,val)
elif characteristics=='motor_settings':
self.dev.writeCharacteristic(self.motor_settings_handle,val)
else:
raise ValueError('Invalid Characteristics')
def connect(self):
"""
Establish the BLE connection.
"""
self.dev.connect(self.address,'random')
def disconnect(self):
"""
Close the BLE connection.
"""
self.dev.disconnect()
def read_motor_measurement(self):
"""
Get the position, velocity, and torque and store them to the properties 'position' in rad, 'velocity' in rad/sec, and 'torque' in N.m.
"""
ba=self.dev.readCharacteristic(self.motor_measurement_handle)
self.position=bytes2float(ba[0:4])
self.velocity=bytes2float(ba[4:8])
self.torque=bytes2float(ba[8:12])
return (self.position,self.velocity,self.torque)
def read_imu_mesurement(self):
"""
Get the x,y,z axis acceleration, temperature, and anguler velocities around x,y,z axis
and store them to 'accel_x', 'accel_y', 'accel_z' in g(9.80665 m/s^2), 'temp' in degree Celsius, 'gyro_x', 'gyro_y', and 'gyro_z' in rad/sec.
"""
self.enableIMU()
ba=self.dev.readCharacteristic(self.motor_imu_measurement_handle)
self.accel_x=bytes2int16_t(ba[0:2])* 2.0 / 32767
self.accel_y=bytes2int16_t(ba[2:4])* 2.0 / 32767
self.accel_z=bytes2int16_t(ba[4:6])* 2.0 / 32767
self.temp=bytes2int16_t(ba[6:8])/333.87 + 21.00
self.gyro_x=bytes2int16_t(ba[8:10])* 0.00013316211
self.gyro_y=bytes2int16_t(ba[10:12])* 0.00013316211
self.gyro_z=bytes2int16_t(ba[12:14])* 0.00013316211
return (self.accel_x,self.accel_y,self.accel_z,self.temp,self.gyro_x,self.gyro_y,self.gyro_z)
def __read_float_data(self,ba):
return bytes2float(ba[4:8])
def __read_uint8_data(self,ba):
return bytes2uint8_t(ba[4])
def __read_rgb_data(self,ba):
return (ba[4],ba[5],ba[6])
def __read_setting_value(self,comm):
float_value_comms=[0x02,0x03,0x07,0x08,0x0E,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E]
valid_comms=[0x05,0x3A]
valid_comms.extend(float_value_comms)
if not (comm in valid_comms):
return
self.readRegister(comm)
ba=self.dev.readCharacteristic(self.motor_settings_handle)
while len(ba)==6:
ba=self.dev.readCharacteristic(self.motor_settings_handle)
if comm in float_value_comms:
return self.__read_float_data(ba)
if comm==0x05:
return self.__read_uint8_data(ba)
if comm==0x3A:
return self.__read_rgb_data(ba)
def read_maxSpeed(self):
return self.__read_setting_value(0x02)
def read_minSpeed(self):
return self.__read_setting_value(0x03)
def read_curveType(self):
return self.__read_setting_value(0x05)
def read_acc(self):
return self.__read_setting_value(0x07)
def read_dec(self):
return self.__read_setting_value(0x08)
def read_maxTorque(self):
return self.__read_setting_value(0x0E)
def read_qCurrentP(self):
return self.__read_setting_value(0x18)
def read_qCurrentI(self):
return self.__read_setting_value(0x19)
def read_qCurrentD(self):
return self.__read_setting_value(0x1A)
def read_speedP(self):
return self.__read_setting_value(0x1B)
def read_speedI(self):
return self.__read_setting_value(0x1C)
def read_speedD(self):
return self.__read_setting_value(0x1D)
def read_positionP(self):
return self.__read_setting_value(0x1E)
def read_ownColor(self):
return self.__read_setting_value(0x3A)
|
settings.py
|
from flask import request, redirect, Response, session
import urllib.parse
import threading
import time
import json
class Settings():
endpoints = ["/api/settings"]
endpoint_name = "api_settings"
endpoint_methods = ["GET", "POST"]
def __init__(self, fhdhr):
self.fhdhr = fhdhr
self.restart_url = "/api/settings?method=restart_actual"
self.restart_sleep = 5
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
method = request.args.get('method', default="get", type=str)
redirect_url = request.args.get('redirect', default=None, type=str)
if method == "get":
web_settings_dict = {}
for config_section in list(self.fhdhr.config.conf_default.keys()):
web_settings_dict[config_section] = {}
for config_item in list(self.fhdhr.config.conf_default[config_section].keys()):
web_settings_dict[config_section][config_item] = {
"value": self.fhdhr.config.dict[config_section][config_item],
}
if self.fhdhr.config.conf_default[config_section][config_item]["config_web_hidden"]:
web_settings_dict[config_section][config_item]["value"] = "***********"
return_json = json.dumps(web_settings_dict, indent=4)
return Response(status=200,
response=return_json,
mimetype='application/json')
elif method == "update":
config_section = request.form.get('config_section', None)
config_name = request.form.get('config_name', None)
config_value = request.form.get('config_value', None)
if not config_section or not config_name or not config_value:
if redirect_url:
return redirect("%s?retmessage=%s" % (redirect_url, urllib.parse.quote("%s Failed" % method)))
else:
return "%s Falied" % method
self.fhdhr.config.write(config_name, config_value, config_section)
elif method == "restart":
restart_thread = threading.Thread(target=self.restart_thread)
restart_thread.start()
return redirect("%s?retmessage=%s" % (redirect_url, urllib.parse.quote("Restarting in %s seconds" % self.restart_sleep)))
elif method == "restart_actual":
session["restart"] = True
if redirect_url:
return redirect("%s?retmessage=%s" % (redirect_url, urllib.parse.quote("%s Success" % method)))
else:
return "%s Success" % method
def restart_thread(self):
time.sleep(self.restart_sleep)
self.fhdhr.api.get(self.restart_url)
|
CoronaCrawler.py
|
import requests
import threading
import time
from bs4 import BeautifulSoup
class CronaCrawler:
def __init__(self, url: str):
self.__url = url
self.__page = page = requests.Session().get(url)
self.__soup = BeautifulSoup(page.content, 'html.parser')
self.__isUpdated = False
self.__t1 = threading.Thread(target=self.__country_data_update)
self.__t2 = threading.Thread(target=self.__area_data_update)
def __get_json_in_script(self, soup, id: str):
text = str(soup.find('script', id=id).string)
return text[text.find('['):text.rfind(']')+1]
def __save_as_json(self, jsonName: str, value: str):
with open(jsonName+'.json', 'w', encoding='utf8') as js:
js.write(value)
def __country_data_update(self):
country_info = self.__get_json_in_script(
self.__soup, 'getListByCountryTypeService2true')
self.__save_as_json('data/country_data', country_info)
def __area_data_update(self):
area_info = self.__get_json_in_script(self.__soup, 'getAreaStat')
self.__save_as_json('data/area_data', area_info)
def data_update(self):
print("数据已更新")
self.__t1.start()
self.__t2.start()
self.__isUpdated = True
def data_update_by_time(self, secs: int):
print("爬虫开始,更新时间间隔为 "+str(secs)+" 秒")
while True:
self.data_update()
time.sleep(secs)
@property
def isUpdated(self):
return self.__isUpdated
@isUpdated.setter
def isUpdated(self, value: bool):
self.__isUpdated = value
cronaCrawler = CronaCrawler("https://ncov.dxy.cn/ncovh5/view/pneumonia")
if __name__ == "__main__":
cronaCrawler.data_update_by_time(3*60)
|
core.py
|
'''
Created on May 29, 2015
@author: mzwier
'''
from pickle import UnpicklingError
# Every ten seconds the master requests a status report from workers.
# This also notifies workers that the master is still alive
DEFAULT_STATUS_POLL = 10
# If we haven't heard from the master or a worker (as appropriate) in these
# amounts of time, we assume a crash and shut down.
MASTER_CRASH_TIMEOUT = DEFAULT_STATUS_POLL * 6
WORKER_CRASH_TIMEOUT = DEFAULT_STATUS_POLL * 3
import logging
log = logging.getLogger(__name__)
#import gevent
import sys, uuid, socket, os,tempfile, errno, time, threading, contextlib, traceback, multiprocessing, json, re
from collections import OrderedDict
import signal
signames = {val:name for name, val in reversed(sorted(signal.__dict__.items()))
if name.startswith('SIG') and not name.startswith('SIG_')}
import zmq
import numpy
DEFAULT_LINGER = 1
def randport(address='127.0.0.1'):
'''Select a random unused TCP port number on the given address.'''
s = socket.socket()
s.bind((address,0))
try:
port = s.getsockname()[1]
finally:
s.close()
return port
class ZMQWMError(RuntimeError):
'''Base class for errors related to the ZeroMQ work manager itself'''
pass
class ZMQWorkerMissing(ZMQWMError):
'''Exception representing that a worker processing a task died or disappeared'''
pass
class ZMQWMEnvironmentError(ZMQWMError):
'''Class representing an error in the environment in which the ZeroMQ work manager is running.
This includes such things as master/worker ID mismatches.'''
class ZMQWMTimeout(ZMQWMEnvironmentError):
'''A timeout of a sort that indicatess that a master or worker has failed or never started.'''
class Message:
SHUTDOWN = 'shutdown'
ACK = 'ok'
NAK = 'no'
IDENTIFY = 'identify' # Two-way identification (a reply must be an IDENTIFY message)
TASKS_AVAILABLE = 'tasks_available'
TASK_REQUEST = 'task_request'
MASTER_BEACON = 'master_alive'
RECONFIGURE_TIMEOUT = 'reconfigure_timeout'
TASK = 'task'
RESULT = 'result'
idempotent_announcement_messages = {SHUTDOWN, TASKS_AVAILABLE, MASTER_BEACON}
def __init__(self, message=None, payload=None, master_id=None, src_id=None):
if isinstance(message,Message):
self.message = message.message
self.payload = message.payload
self.master_id = message.master_id
self.src_id = message.src_id
else:
self.master_id = master_id
self.src_id = src_id
self.message = message
self.payload = payload
def __repr__(self):
return ('<{!s} master_id={master_id!s} src_id={src_id!s} message={message!r} payload={payload!r}>'
.format(self.__class__.__name__, **self.__dict__))
@classmethod
def coalesce_announcements(cls, messages):
d = OrderedDict()
for msg in messages:
if msg.message in cls.idempotent_announcement_messages:
key = msg.message
else:
key = (msg.message, msg.payload)
d[key] = msg
coalesced = list(msg.values())
log.debug('coalesced {} announcements into {}'.format(len(messages), len(coalesced)))
return coalesced
TIMEOUT_MASTER_BEACON = 'master_beacon'
TIMEOUT_WORKER_CONTACT = 'worker_contact'
class Task:
def __init__(self, fn, args, kwargs, task_id = None):
self.task_id = task_id or uuid.uuid4()
self.fn = fn
self.args = args
self.kwargs = kwargs
def __repr__(self):
try:
return '<{} {task_id!s} {fn!r} {:d} args {:d} kwargs>'\
.format(self.__class__.__name__, len(self.args), len(self.kwargs), **self.__dict__)
except TypeError:
# no length
return '<{} {task_id!s} {fn!r}'.format(self.__class__.__name__, **self.__dict__)
def __hash__(self):
return hash(self.task_id)
def execute(self):
'''Run this task, returning a Result object.'''
rsl = Result(task_id = self.task_id)
try:
rsl.result = self.fn(*self.args, **self.kwargs)
except BaseException as e:
rsl.exception = e
rsl.traceback = traceback.format_exc()
return rsl
class Result:
def __init__(self, task_id, result=None, exception=None, traceback=None):
self.task_id = task_id
self.result = result
self.exception = exception
self.traceback = traceback
def __repr__(self):
return '<{} {task_id!s} ({})>'\
.format(self.__class__.__name__, 'result' if self.exception is None else 'exception', **self.__dict__)
def __hash__(self):
return hash(self.task_id)
class PassiveTimer:
__slots__ = {'started', 'duration'}
def __init__(self, duration, started=None):
if started is None:
started = time.time()
self.started = started
self.duration = duration
@property
def expired(self, at=None):
at = at or time.time()
return (at - self.started) > self.duration
@property
def expires_in(self):
at = time.time()
return self.started + self.duration - at
def reset(self, at=None):
self.started = at or time.time()
start = reset
class PassiveMultiTimer:
def __init__(self):
self._identifiers = numpy.empty((0,), numpy.object_)
self._durations = numpy.empty((0,), float)
self._started = numpy.empty((0,), float)
self._indices = {} # indexes into durations/started, keyed by identifier
def add_timer(self, identifier, duration):
if identifier in self._identifiers:
raise KeyError('timer {!r} already present'.format(identifier))
new_idx = len(self._identifiers)
self._durations.resize((new_idx+1,))
self._started.resize((new_idx+1,))
self._identifiers.resize((new_idx+1,))
self._durations[new_idx] = duration
self._started[new_idx] = time.time()
self._identifiers[new_idx] = identifier
self._indices[identifier] = new_idx
def remove_timer(self, identifier):
idx = self._indices.pop(identifier)
self._durations = numpy.delete(self._durations, idx)
self._started = numpy.delete(self._started, idx)
self._identifiers = numpy.delete(self._identifiers, idx)
def change_duration(self, identifier, duration):
idx = self._indices[identifier]
self._durations[idx] = duration
def reset(self, identifier=None, at=None):
at = at or time.time()
if identifier is None:
# reset all timers
self._started.fill(at)
else:
self._started[self._indices[identifier]] = at
def expired(self, identifier, at = None):
at = at or time.time()
idx = self._indices[identifier]
return (at - self._started[idx]) > self._durations[idx]
def next_expiration(self):
at = time.time()
idx = (self._started + self._durations - at).argmin()
return self._identifiers[idx]
def next_expiration_in(self):
at = time.time()
idx = (self._started + self._durations - at).argmin()
next_at = self._started[idx] + self._durations[idx] - at
return next_at if next_at > 0 else 0
def which_expired(self, at=None):
at = at or time.time()
expired_indices = (at - self._started) > self._durations
return self._identifiers[expired_indices]
class ZMQCore:
# The overall communication topology (socket layout, etc)
# Cannot be updated without updating configuration files, command-line parameters,
# etc. (Changes break user scripts.)
PROTOCOL_MAJOR = 3
# The set of messages and replies in use.
# Cannot be updated without changing existing communications logic. (Changes break
# the ZMQ WM library.)
PROTOCOL_MINOR = 0
# Minor updates and additions to the protocol.
# Changes do not break the ZMQ WM library, but only add new
# functionality/code paths without changing existing code paths.
PROTOCOL_UPDATE = 0
PROTOCOL_VERSION = (PROTOCOL_MAJOR, PROTOCOL_MINOR, PROTOCOL_UPDATE)
# The default transport for "internal" (inter-thread/-process) communication
# IPC should work except on really odd systems with no local storage
internal_transport = 'ipc'
default_comm_mode = 'ipc'
default_master_heartbeat = 20.0
default_worker_heartbeat = 20.0
default_timeout_factor = 5.0
default_startup_timeout = 120.0
default_shutdown_timeout = 5.0
_ipc_endpoints_to_delete = []
@classmethod
def make_ipc_endpoint(cls):
(fd, socket_path) = tempfile.mkstemp()
os.close(fd)
endpoint = 'ipc://{}'.format(socket_path)
cls._ipc_endpoints_to_delete.append(endpoint)
return endpoint
@classmethod
def remove_ipc_endpoints(cls):
while cls._ipc_endpoints_to_delete:
endpoint = cls._ipc_endpoints_to_delete.pop()
assert endpoint.startswith('ipc://')
socket_path = endpoint[6:]
try:
os.unlink(socket_path)
except OSError as e:
if e.errno != errno.ENOENT:
log.debug('could not unlink IPC endpoint {!r}: {}'.format(socket_path, e))
else:
log.debug('unlinked IPC endpoint {!r}'.format(socket_path))
@classmethod
def make_tcp_endpoint(cls, address='127.0.0.1'):
return 'tcp://{}:{}'.format(address,randport(address))
@classmethod
def make_internal_endpoint(cls):
assert cls.internal_transport in {'ipc', 'tcp'}
if cls.internal_transport == 'ipc':
return cls.make_ipc_endpoint()
else: # cls.internal_transport == 'tcp'
return cls.make_tcp_endpoint()
def __init__(self):
# Unique identifier of this ZMQ node
self.node_id = uuid.uuid4()
# Identifier of the task distribution network (work manager)
self.network_id = None
# Beacons
# Workers expect to hear from the master at least every master_beacon_period
# Master expects to hear from the workers at least every worker_beacon_period
# If more than {master,worker}_beacon_period*timeout_factor elapses, the
# master/worker is considered missing.
self.worker_beacon_period = self.default_worker_heartbeat
self.master_beacon_period = self.default_master_heartbeat
self.timeout_factor = self.default_timeout_factor
# These should allow for some fuzz, and should ratchet up as more and
# more workers become available (maybe order 1 s for 100 workers?) This
# should also account appropriately for startup delay on difficult
# systems.
# Number of seconds to allow first contact between at least one worker
# and the master.
self.startup_timeout = self.default_startup_timeout
# A friendlier description for logging
self.node_description = '{!s} on {!s} at PID {:d}'.format(self.__class__.__name__,
socket.gethostname(),
os.getpid())
self.validation_fail_action = 'exit' # other options are 'raise' and 'warn'
self.log = logging.getLogger(__name__ + '.' + self.__class__.__name__ + '.' + str(self.node_id))
# ZeroMQ context
self.context = None
# External communication endpoints
self.rr_endpoint = None
self.ann_endpoint = None
self.inproc_endpoint = 'inproc://{!s}'.format(self.node_id)
# Sockets
self.rr_socket = None
self.ann_socket = None
# This is the main-thread end of this
self._inproc_socket = None
self.master_id = None
if os.environ.get('WWMGR_ZMQ_DEBUG_MESSAGES', 'n').upper() in {'Y', 'YES', '1', 'T', 'TRUE'}:
self._super_debug = True
else:
self._super_debug = None
def __repr__(self):
return '<{!s} {!s}>'.format(self.__class__.__name__, self.node_id)
def get_identification(self):
return {'node_id': self.node_id,
'master_id': self.master_id,
'class': self.__class__.__name__,
'description': self.node_description,
'hostname': socket.gethostname(),
'pid': os.getpid()}
def validate_message(self, message):
'''Validate incoming message. Raises an exception if the message is improperly formatted (TypeError)
or does not correspond to the appropriate master (ZMQWMEnvironmentError).'''
try:
super_validator = super(ZMQCore,self).validate_message
except AttributeError:
pass
else:
super_validator(message)
if not isinstance(message, Message):
raise TypeError('message is not an instance of core.Message')
if message.src_id is None:
raise ZMQWMEnvironmentError('message src_id is not set')
if self.master_id is not None and message.master_id is not None and message.master_id != self.master_id:
raise ZMQWMEnvironmentError('incoming message associated with another master (this={!s}, incoming={!s}'.format(self.master_id, message.master_id))
@contextlib.contextmanager
def message_validation(self, msg):
'''A context manager for message validation. The instance variable ``validation_fail_action``
controls the behavior of this context manager:
* 'raise': re-raise the exception that indicated failed validation. Useful for development.
* 'exit' (default): report the error and exit the program.
* 'warn': report the error and continue.'''
try:
yield
except Exception as e:
if self.validation_fail_action == 'raise':
self.log.exception('message validation failed for {!r}'.format(msg))
raise
elif self.validation_fail_action == 'exit':
self.log.error('message validation falied: {!s}'.format(e))
sys.exit(1)
elif self.validation_fail_action == 'warn':
self.log.warn('message validation falied: {!s}'.format(e))
def recv_message(self, socket, flags=0, validate=True, timeout=None):
'''Receive a message object from the given socket, using the given flags.
Message validation is performed if ``validate`` is true.
If ``timeout`` is given, then it is the number of milliseconds to wait
prior to raising a ZMQWMTimeout exception. ``timeout`` is ignored if
``flags`` includes ``zmq.NOBLOCK``.'''
if timeout is None or flags & zmq.NOBLOCK:
message = socket.recv_pyobj(flags)
else:
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
try:
poll_results = dict(poller.poll(timeout=timeout))
if socket in poll_results:
message = socket.recv_pyobj(flags)
else:
raise ZMQWMTimeout('recv timed out')
finally:
poller.unregister(socket)
if self._super_debug:
self.log.debug('received {!r}'.format(message))
if validate:
with self.message_validation(message):
self.validate_message(message)
return message
def recv_all(self, socket, flags=0, validate=True):
'''Receive all messages currently available from the given socket.'''
messages = []
while True:
try:
messages.append(self.recv_message(socket, flags | zmq.NOBLOCK, validate))
except zmq.Again:
return messages
def recv_ack(self, socket, flags=0, validate=True, timeout=None):
msg = self.recv_message(socket, flags, validate, timeout)
if validate:
with self.message_validation(msg):
assert msg.message in (Message.ACK, Message.NAK)
return msg
def send_message(self, socket, message, payload=None, flags=0):
'''Send a message object. Subclasses may override this to
decorate the message with appropriate IDs, then delegate upward to actually send
the message. ``message`` may either be a pre-constructed ``Message`` object or
a message identifier, in which (latter) case ``payload`` will become the message payload.
``payload`` is ignored if ``message`` is a ``Message`` object.'''
message = Message(message, payload)
if message.master_id is None:
message.master_id = self.master_id
message.src_id=self.node_id
if self._super_debug:
self.log.debug('sending {!r}'.format(message))
socket.send_pyobj(message,flags)
def send_reply(self, socket, original_message, reply=Message.ACK, payload=None,flags=0):
'''Send a reply to ``original_message`` on ``socket``. The reply message
is a Message object or a message identifier. The reply master_id and worker_id are
set from ``original_message``, unless master_id is not set, in which case it is
set from self.master_id.'''
reply = Message(reply, payload)
reply.master_id = original_message.master_id or self.master_id
assert original_message.worker_id is not None # should have been caught by validation prior to this
reply.worker_id = original_message.worker_id
self.send_message(socket, reply)
def send_ack(self, socket, original_message):
'''Send an acknowledgement message, which is mostly just to respect REQ/REP
recv/send patterns.'''
self.send_message(socket, Message(Message.ACK,
master_id=original_message.master_id or self.master_id,
src_id=self.node_id))
def send_nak(self, socket, original_message):
'''Send a negative acknowledgement message.'''
self.send_message(socket, Message(Message.NAK,
master_id=original_message.master_id or self.master_id,
src_id=self.node_id))
def send_inproc_message(self, message, payload=None, flags=0):
inproc_socket = self.context.socket(zmq.PUB)
inproc_socket.connect(self.inproc_endpoint)
# annoying wait for sockets to settle
time.sleep(0.01)
self.send_message(inproc_socket, message, payload, flags)
# used to be a close with linger here, but it was cutting off messages
def signal_shutdown(self):
try:
self.send_inproc_message(Message.SHUTDOWN)
except AttributeError:
# this is expected if self.context has been set to None (i.e. it has already been destroyed)
pass
except Exception as e:
self.log.debug('ignoring exception {!r} in signal_shutdown()'.format(e))
def shutdown_handler(self, signal=None, frame=None):
if signal is None:
self.log.info('shutting down')
else:
self.log.info('shutting down on signal {!s}'.format(signames.get(signal,signal)))
self.signal_shutdown()
def install_signal_handlers(self, signals = None):
if not signals:
signals = {signal.SIGINT, signal.SIGQUIT, signal.SIGTERM}
for sig in signals:
signal.signal(sig, self.shutdown_handler)
def install_sigint_handler(self):
self.install_signal_handlers()
def startup(self):
self.context = zmq.Context()
self.comm_thread = threading.Thread(target=self.comm_loop)
self.comm_thread.start()
#self.install_signal_handlers()
def shutdown(self):
self.shutdown_handler()
def join(self):
while True:
self.comm_thread.join(0.1)
if not self.comm_thread.is_alive():
break
def shutdown_process(process, timeout=1.0):
process.join(timeout)
if process.is_alive():
log.debug('sending SIGINT to process {:d}'.format(process.pid))
os.kill(process.pid, signal.SIGINT)
process.join(timeout)
if process.is_alive():
log.warning('sending SIGKILL to worker process {:d}'.format(process.pid))
os.kill(process.pid, signal.SIGKILL)
process.join()
log.debug('process {:d} terminated with code {:d}'.format(process.pid, process.exitcode))
else:
log.debug('worker process {:d} terminated gracefully with code {:d}'.format(process.pid, process.exitcode))
assert not process.is_alive()
class IsNode:
def __init__(self, n_local_workers=None):
from work_managers.zeromq.worker import ZMQWorker
if n_local_workers is None:
n_local_workers = multiprocessing.cpu_count()
self.downstream_rr_endpoint = None
self.downstream_ann_endpoint = None
if n_local_workers:
self.local_ann_endpoint = self.make_internal_endpoint()
self.local_rr_endpoint = self.make_internal_endpoint()
self.local_workers = [ZMQWorker(self.local_rr_endpoint, self.local_ann_endpoint) for _n in range(n_local_workers)]
else:
self.local_ann_endpoint = None
self.local_rr_endpoint = None
self.local_workers = []
self.local_worker_processes = [multiprocessing.Process(target = worker.startup, args=(n,))
for (n, worker) in enumerate(self.local_workers)]
self.host_info_files = []
def write_host_info(self, filename=None):
filename = filename or 'zmq_host_info_{}.json'.format(self.node_id.hex)
hostname = socket.gethostname()
with open(filename, 'wt') as infofile:
info = {}
info['rr_endpoint'] = re.sub(r'\*', hostname, self.downstream_rr_endpoint or '')
info['ann_endpoint'] = re.sub(r'\*', hostname, self.downstream_ann_endpoint or '')
json.dump(info,infofile)
self.host_info_files.append(filename)
def startup(self):
for process in self.local_worker_processes:
process.start()
def shutdown(self):
try:
shutdown_timeout = self.shutdown_timeout
except AttributeError:
shutdown_timeout = 1.0
for process in self.local_worker_processes:
shutdown_process(process, shutdown_timeout)
for host_info_file in self.host_info_files:
try:
os.unlink(host_info_file)
except OSError:
pass
|
test_streams.py
|
"""Tests for streams.py."""
import gc
import os
import queue
import pickle
import socket
import sys
import threading
import unittest
from unittest import mock
from test import support
try:
import ssl
except ImportError:
ssl = None
import asyncio
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
class StreamTests(test_utils.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
@mock.patch('asyncio.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.StreamReader()
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
def _basetest_open_connection(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
def test_open_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection(conn_fut)
def _basetest_open_connection_no_loop_ssl(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
try:
reader, writer = self.loop.run_until_complete(open_connection_fut)
finally:
asyncio.set_event_loop(None)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_connection_no_loop_ssl(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_connection(
*httpd.address,
ssl=test_utils.dummy_ssl_context(),
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_unix_connection_no_loop_ssl(self):
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_unix_connection(
httpd.address,
ssl=test_utils.dummy_ssl_context(),
server_hostname='',
loop=self.loop)
self._basetest_open_connection_no_loop_ssl(conn_fut)
def _basetest_open_connection_error(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer._protocol.connection_lost(ZeroDivisionError())
f = reader.read()
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(f)
writer.close()
test_utils.run_briefly(self.loop)
self.assertEqual(messages, [])
def test_open_connection_error(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
@support.skip_unless_bind_unix_socket
def test_open_unix_connection_error(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
def test_feed_empty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'')
self.assertEqual(b'', stream._buffer)
def test_feed_nonempty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.assertEqual(self.DATA, stream._buffer)
def test_read_zero(self):
# Read zero bytes.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_read(self):
# Read bytes.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(30), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
self.assertEqual(b'', stream._buffer)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
self.assertEqual(b'line2', stream._buffer)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertEqual(b'', stream._buffer)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
self.assertEqual(b'', stream._buffer)
def test_read_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_invalid_limit(self):
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=0, loop=self.loop)
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=-1, loop=self.loop)
def test_read_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'chunk1 ')
read_task = asyncio.Task(stream.readline(), loop=self.loop)
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
self.assertEqual(b' chunk4', stream._buffer)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
self.assertEqual(b'line2\n', stream._buffer)
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1')
stream.feed_data(b'li')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# No b'\n' at the end. The 'limit' is set to 3. So before
# waiting for the new data in buffer, 'readline' will consume
# the entire buffer, and since the length of the consumed data
# is more than 3, it will raise a ValueError. The buffer is
# expected to be empty now.
self.assertEqual(b'', stream._buffer)
def test_at_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer had just one line of data, and after raising
# a ValueError it should be empty.
self.assertEqual(b'', stream._buffer)
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'chunk3\n', stream._buffer)
# check strictness of the limit
stream = asyncio.StreamReader(limit=7, loop=self.loop)
stream.feed_data(b'1234567\n')
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'1234567\n', line)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678\n')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
self.assertEqual(b'line2\nline3\n', stream._buffer)
def test_readline_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'some data')
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
def test_readline_read_byte_count(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
self.assertEqual(b'ine3\n', stream._buffer)
def test_readline_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readuntil_separator(self):
stream = asyncio.StreamReader(loop=self.loop)
with self.assertRaisesRegex(ValueError, 'Separator should be'):
self.loop.run_until_complete(stream.readuntil(separator=b''))
def test_readuntil_multi_chunks(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(separator=b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAAxxx')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'xxx', stream._buffer)
def test_readuntil_multi_chunks_1(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYaa')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYa')
stream.feed_data(b'aa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'aaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'aaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'Xaaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'Xaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'XXX')
stream.feed_data(b'a')
stream.feed_data(b'a')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'XXXaaa', data)
self.assertEqual(b'', stream._buffer)
def test_readuntil_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'some dataAA')
stream.feed_eof()
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(cm.exception.partial, b'some dataAA')
self.assertIsNone(cm.exception.expected)
self.assertEqual(b'', stream._buffer)
def test_readuntil_limit_found_sep(self):
stream = asyncio.StreamReader(loop=self.loop, limit=3)
stream.feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream.feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
with self.assertRaisesRegex(ValueError, 'less than zero'):
self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly(self):
# Read exact number of bytes.
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.readexactly(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
self.assertEqual(b'', stream._buffer)
def test_readexactly_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
@asyncio.coroutine
def set_err():
stream.set_exception(ValueError())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2]))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = asyncio.StreamReader(loop=self.loop)
t = asyncio.Task(stream.readline(), loop=self.loop)
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_start_server(self):
class MyServer:
def __init__(self, loop):
self.server = None
self.loop = loop
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
def start(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client,
sock=sock,
loop=self.loop))
return sock.getsockname()
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
addr = sock.getsockname()
sock.close()
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client_callback,
host=addr[0], port=addr[1],
loop=self.loop))
return addr
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
addr = server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
server = MyServer(self.loop)
addr = server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(addr),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@support.skip_unless_bind_unix_socket
def test_start_unix_server(self):
class MyServer:
def __init__(self, loop, path):
self.server = None
self.loop = loop
self.path = path
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
def start(self):
self.server = self.loop.run_until_complete(
asyncio.start_unix_server(self.handle_client,
path=self.path,
loop=self.loop))
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
start = asyncio.start_unix_server(self.handle_client_callback,
path=self.path,
loop=self.loop)
self.server = self.loop.run_until_complete(start)
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(path):
reader, writer = await asyncio.open_unix_connection(
path, loop=self.loop)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start_callback()
msg = self.loop.run_until_complete(asyncio.Task(client(path),
loop=self.loop))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
def test_read_all_from_pipe_reader(self):
# See asyncio issue 168. This test is derived from the example
# subprocess_attach_read_pipe.py, but we configure the
# StreamReader's limit so that twice it is less than the size
# of the data writter. Also we must explicitly attach a child
# watcher to the event loop.
code = """\
import os, sys
fd = int(sys.argv[1])
os.write(fd, b'data')
os.close(fd)
"""
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
pipe = open(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=self.loop, limit=1)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
transport, _ = self.loop.run_until_complete(
self.loop.connect_read_pipe(lambda: protocol, pipe))
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
create = asyncio.create_subprocess_exec(*args,
pass_fds={wfd},
loop=self.loop)
proc = self.loop.run_until_complete(create)
self.loop.run_until_complete(proc.wait())
finally:
asyncio.set_child_watcher(None)
os.close(wfd)
data = self.loop.run_until_complete(reader.read(-1))
self.assertEqual(data, b'data')
def test_streamreader_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = asyncio.StreamReader()
self.assertIs(reader._loop, self.loop)
def test_streamreaderprotocol_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = mock.Mock()
protocol = asyncio.StreamReaderProtocol(reader)
self.assertIs(protocol._loop, self.loop)
def test_drain_raises(self):
# See http://bugs.python.org/issue25441
# This test should not use asyncio for the mock server; the
# whole point of the test is to test for a bug in drain()
# where it never gives up the event loop but the socket is
# closed on the server side.
q = queue.Queue()
def server():
# Runs in a separate thread.
sock = socket.socket()
with sock:
sock.bind(('localhost', 0))
sock.listen(1)
addr = sock.getsockname()
q.put(addr)
clt, _ = sock.accept()
clt.close()
async def client(host, port):
reader, writer = await asyncio.open_connection(
host, port, loop=self.loop)
while True:
writer.write(b"foo\n")
await writer.drain()
# Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server)
thread.setDaemon(True)
thread.start()
addr = q.get()
# Should not be stuck in an infinite loop.
with self.assertRaises((ConnectionResetError, ConnectionAbortedError,
BrokenPipeError)):
self.loop.run_until_complete(client(*addr))
# Clean up the thread. (Only on success; on failure, it may
# be stuck in accept().)
thread.join()
def test___repr__(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__nondefault_limit(self):
stream = asyncio.StreamReader(loop=self.loop, limit=123)
self.assertEqual("<StreamReader limit=123>", repr(stream))
def test___repr__eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
self.assertEqual("<StreamReader eof>", repr(stream))
def test___repr__data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'data')
self.assertEqual("<StreamReader 4 bytes>", repr(stream))
def test___repr__exception(self):
stream = asyncio.StreamReader(loop=self.loop)
exc = RuntimeError()
stream.set_exception(exc)
self.assertEqual("<StreamReader exception=RuntimeError()>",
repr(stream))
def test___repr__waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRegex(
repr(stream),
r"<StreamReader waiter=<Future pending[\S ]*>>")
stream._waiter.set_result(None)
self.loop.run_until_complete(stream._waiter)
stream._waiter = None
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__transport(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._transport = mock.Mock()
stream._transport.__repr__ = mock.Mock()
stream._transport.__repr__.return_value = "<Transport>"
self.assertEqual("<StreamReader transport=<Transport>>", repr(stream))
def test_IncompleteReadError_pickleable(self):
e = asyncio.IncompleteReadError(b'abc', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.partial, e2.partial)
self.assertEqual(e.expected, e2.expected)
def test_LimitOverrunError_pickleable(self):
e = asyncio.LimitOverrunError('message', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.consumed, e2.consumed)
def test_wait_closed_on_close(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = rd.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertFalse(wr.is_closing())
wr.close()
self.assertTrue(wr.is_closing())
self.loop.run_until_complete(wr.wait_closed())
def test_wait_closed_on_close_with_unread_data(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
wr.close()
self.loop.run_until_complete(wr.wait_closed())
def test_del_stream_before_sock_closing(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address, loop=self.loop))
sock = wr.get_extra_info('socket')
self.assertNotEqual(sock.fileno(), -1)
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
# drop refs to reader/writer
del rd
del wr
gc.collect()
# make a chance to close the socket
test_utils.run_briefly(self.loop)
self.assertEqual(1, len(messages))
self.assertEqual(sock.fileno(), -1)
self.assertEqual(1, len(messages))
self.assertEqual('An open stream object is being garbage '
'collected; call "stream.close()" explicitly.',
messages[0]['message'])
def test_del_stream_before_connection_made(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
rd = asyncio.StreamReader(loop=self.loop)
pr = asyncio.StreamReaderProtocol(rd, loop=self.loop)
del rd
gc.collect()
tr, _ = self.loop.run_until_complete(
self.loop.create_connection(
lambda: pr, *httpd.address))
sock = tr.get_extra_info('socket')
self.assertEqual(sock.fileno(), -1)
self.assertEqual(1, len(messages))
self.assertEqual('An open stream was garbage collected prior to '
'establishing network connection; '
'call "stream.close()" explicitly.',
messages[0]['message'])
def test_async_writer_api(self):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address,
loop=self.loop))
f = wr.awrite(b'GET / HTTP/1.0\r\n\r\n')
self.loop.run_until_complete(f)
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = rd.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
f = wr.aclose()
self.loop.run_until_complete(f)
self.assertEqual(messages, [])
if __name__ == '__main__':
unittest.main()
|
collatz.py
|
#########################################################
# #
# Collatz conjecture algorithm: #
# -Starting from a positive integer number #
# -If the number is even, divide it by two #
# -If the number is odd, triple it and add one #
# -If the number equals 1, the algoritmh ends #
# #
#########################################################
import glob
import os
import datetime
import matplotlib.pyplot as plt
from threading import Thread as Th
#from multiprocessing import Process
PROGRAM_STATE = {1: True, 2: False}
THREAD_MAX_NUM = 5
THREADS_ARGS = {0: 0, 1: -1, 2: 1, 3: -2, 4: 2}
DIRNAME = "testfiles"
FILENAME = "_Test.txt"
MULTIPLOT = 0
def collatz_num(n, subdir_name):
with open(f"{DIRNAME}/{subdir_name}/{str(int(n)) + FILENAME}", "w") as fout:
fout.write(str(int(n)))
while n != 1:
fout.write(", ")
if n % 2 == 0:
n = int(n / 2)
else:
n = (n * 3) + 1
fout.write(str(int(n)))
def plotres(subdir_name):
tmp_values_arr = []
tmp_step_arr = []
f_txts = glob.glob(f"{DIRNAME}/{subdir_name}/*.txt")
for f_test in f_txts:
with open(f_test, 'r') as tfile:
for line in tfile:
for step, element in enumerate(line.split(', ')):
tmp_values_arr.append(int(element))
tmp_step_arr.append(step)
plt.plot(tmp_step_arr, tmp_values_arr)
tmp_values_arr = []
tmp_step_arr = []
plt.xlabel('step number')
plt.ylabel('value')
plt.grid(True)
plt.show()
def create_dir():
if not os.path.exists(DIRNAME):
os.mkdir(DIRNAME)
date = datetime.datetime.now()
subdir_name = str(date.year) + '_' + str(date.month) + '_' + str(date.day) + '_' + str(date.hour) + '_' +\
str(date.minute) + '_' + str(date.second)
if not os.path.exists(f'{DIRNAME}/{subdir_name}'):
os.mkdir(f'{DIRNAME}/{subdir_name}')
return subdir_name
def create_threads(value, subdir_name):
threads_pool = {}
if value > THREAD_MAX_NUM:
delta = value / THREAD_MAX_NUM
for thread_num in range(THREAD_MAX_NUM):
deltanum = THREADS_ARGS[thread_num] * delta
threads_pool[thread_num] = Th(target=collatz_num, args=(value + deltanum, subdir_name,))
else:
thread_num = 0
while value > 0:
threads_pool[thread_num] = Th(target=collatz_num, args=(value, subdir_name,))
thread_num += 1
value -= 1
return threads_pool
if __name__ == "__main__":
to_continue = True
while to_continue:
value = 0
found = False
while not found:
try:
value = int(input("Insert any integer number greater than 1 :\n"))
if value <= 1:
print("Invalid Value\n")
else:
found = True
except ValueError:
print("Invalid Value\n")
subdir_name = create_dir()
threads_pool = create_threads(value, subdir_name)
for thread in threads_pool.keys():
threads_pool[thread].start()
for thread in threads_pool.keys():
threads_pool[thread].join()
if not MULTIPLOT:
plotres(subdir_name)
else:
plotres(subdir_name) #NOT ACTUALLY IMPLEMENTED
#p = Process(target=plotres, args=(subdir_name,))
#p.start()
while True:
try:
to_continue = PROGRAM_STATE[int(input("Do you want to try another value?: 1-Yes 2-No\n"))]
break
except KeyError:
print("Invalid Value\n")
except ValueError:
print("Invalid Value, not a number\n")
|
controller.py
|
import binascii
import datetime
import glob
import hashlib
import json
import logging
import subprocess
import time
from multiprocessing import Process, Lock, Manager
from os import path
import pygame
from flask import Flask, render_template, request
import settings
import util
from rfid import RFID
logger = logging.getLogger(__name__)
"""
Main controller, runs in an infinite loop.
Reads and acts on NFC codes, supplies web interface for tag management.
Web interface is at http://<raspi IP or host name>:5000
Autostart: 'crontab -e', then add line
@reboot cd <project directory> && python -u controller.py 2>&1 >> /home/pi/tmp/nfcmusik.log.txt &
"""
# control bytes for NFC payload
CONTROL_BYTES = dict(
MUSIC_FILE='\x11',
)
# global debug output flag
DEBUG = False
# shut down wlan0 interface N seconds after startup (or last server interaction)
WLAN_OFF_DELAY = 180
class RFIDHandler(object):
"""
RFID handler
"""
def __init__(self):
# flag to stop polling
self.do_stop = False
# mutex for RFID access
self.mutex = Lock()
# manager for interprocess data sharing (polling process writes uid/data)
self.manager = Manager()
# current tag uid
self.uid = self.manager.list(range(5))
# current tag data - 16 bytes
self.data = self.manager.list(range(16))
# music files dictionary
self.music_files_dict = self.manager.dict()
# startup time or last server interaction
self.startup = datetime.datetime.now()
# flag for inter-process communication: reset the startup time
self.reset_startup = self.manager.Value('c', 0)
self.reset_startup.value = 0
# have we shut off WiFi already?
self.is_wlan_off = False
# NFC memory page to use for reading/writing
self.page = 10
# polling cycle time (seconds)
self.sleep = 0.5
# music playing status
self.current_music = None
# last played music file
self.previous_music = None
# must have seen stop signal N times to stop music - avoid
# stopping if signal drops out briefly
self.stop_music_on_stop_count = 3
# to replay same music file, must have seen at least N periods
# of no token - avoid replaying if token is left on device
# but signal drops out briefly
self.replay_on_stop_count = 3
# stop signal counter
self.stop_count = 0
def poll_loop(self):
"""
Poll for presence of tag, read data, until stop() is called.
"""
# initialize music mixer
pygame.mixer.init()
# set default volume
util.set_volume(settings.DEFAULT_VOLUME)
while not self.do_stop:
with self.mutex:
# initialize tag state
self.uid[0] = None
self.data[0] = None
# always create a new RFID interface instance, to clear any errors from previous operations
rdr = RFID()
# check for presence of tag
err, _ = rdr.request()
if not err:
logger.debug("RFIDHandler poll_loop: Tag is present")
# tag is present, get UID
err, uid = rdr.anticoll()
if not err:
logger.debug(f"RFIDHandler poll_loop: Read UID: {uid}")
# read data
err, data = rdr.read(self.page)
if not err:
logger.debug(f"RFIDHandler poll_loop: Read tag data: {data}")
# all good, store data to shared mem
for i in range(5):
self.uid[i] = uid[i]
for i in range(16):
self.data[i] = data[i]
else:
logger.debug("RFIDHandler poll_loop: Error returned from read()")
else:
logger.debug("RFIDHandler poll_loop: Error returned from anticoll()")
# clean up
rdr.cleanup()
# act on data
self.action()
# wait a bit (this is in while loop, NOT in mutex env)
time.sleep(self.sleep)
def write(self, data):
"""
Write a 16-byte string of data to the tag
"""
if len(data) != 16:
logger.debug(f"Illegal data length, expected 16, got {len(data)}")
return False
with self.mutex:
rdr = RFID()
success = False
# check for presence of tag
err, _ = rdr.request()
if not err:
logger.debug("RFIDHandler write: Tag is present")
# tag is present, get UID
err, uid = rdr.anticoll()
if not err:
logger.debug("RFIDHandler write: Read UID: " + str(uid))
# write data: RFID lib writes 16 bytes at a time, but for NTAG213
# only the first four are actually written
err = False
for i in range(4):
page = self.page + i
page_data = [ord(c) for c in data[4 * i: 4 * i + 4]] + [0] * 12
# read data once (necessary for successful writing?)
err_read, _ = rdr.read(page)
if err:
logger.debug("Error signaled on reading page {:d} before writing".format(page))
# write data
err |= rdr.write(page, page_data)
if err:
logger.debug(f'Error signaled on writing page {page:d} with data {page_data:s}')
if not err:
logger.debug("RFIDHandler write: successfully wrote tag data")
success = True
else:
logger.debug("RFIDHandler write: Error returned from write()")
else:
logger.debug("RFIDHandler write: Error returned from anticoll()")
# clean up
rdr.cleanup()
return success
def get_data(self):
"""
Get current tag data as binary string
"""
with self.mutex:
data = list(self.data)
if data[0] is not None:
return "".join([chr(c) for c in data])
else:
return None
def get_uid(self):
"""
Get current tag UID
"""
with self.mutex:
uid = list(self.uid)
if uid[0] is not None:
return "".join([chr(c) for c in uid])
else:
return None
def set_music_files_dict(self, mfd):
"""
Set dictionary of file hashes and music files
"""
with self.mutex:
for k, v in mfd.items():
self.music_files_dict[k] = v
def reset_startup_timer(self):
"""
Set flag to reset the startup timer
"""
self.reset_startup.value = 1
def stop_polling(self):
"""
Stop polling loop
"""
self.do_stop = True
def action(self):
"""
Act on NFC data - call this from within a mutex lock
"""
# check if we should reset the startup time
if self.reset_startup.value > 0:
self.reset_startup.value = 0
self.startup = datetime.datetime.now()
# if enough time has elapsed, shut off the WiFi interface
delta = (datetime.datetime.now() - self.startup).total_seconds()
if delta > WLAN_OFF_DELAY and not self.is_wlan_off:
logger.info("Shutting down WiFi")
self.is_wlan_off = True
subprocess.call(['sudo', 'ifdown', 'wlan0'])
if int(delta) % 10 == 0 and not self.is_wlan_off:
logger.info(f'Shutting down WiFi in (seconds): {WLAN_OFF_DELAY - delta}')
# check if we have valid data
if self.data[0] is not None:
bin_data = "".join([chr(c) for c in self.data])
if bin_data[0] == CONTROL_BYTES['MUSIC_FILE']:
if bin_data in self.music_files_dict:
file_name = self.music_files_dict[bin_data]
file_path = path.join(settings.MUSIC_ROOT, file_name)
if file_name != self.current_music:
# only replay same music file if we saw at least N periods
# of no token
if path.exists(file_path) and (
file_name != self.previous_music or self.stop_count >= self.replay_on_stop_count):
logger.info(f'Playing music file: {file_path}')
# play music file
self.current_music = file_name
self.previous_music = file_name
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
else:
if not path.exists(file_path):
logger.debug(f'File not found: {file_path}')
# token seen - reset stop counter
self.stop_count = 0
else:
logger.debug('Got music file control byte, but unknown file hash')
else:
logger.debug('Unknown control byte')
else:
self.stop_count += 1
logger.debug(f"Resetting action status, stop count {self.stop_count}")
# only stop after token absence for at least N times
if self.stop_count >= self.stop_music_on_stop_count:
self.current_music = None
if pygame.mixer.music.get_busy():
pygame.mixer.music.stop()
#
# Global objects
#
app = Flask(__name__)
# global dictionary of music file hashes and names
music_files_dict = dict()
# global RFID handler instance
rfid_handler = RFIDHandler()
# RFID handling process
rfid_polling_process = Process(target=rfid_handler.poll_loop)
#
# End global objects
#
def music_file_hash(file_name):
"""
Get hash of music file name, replace first byte with a control byte for music playing.
"""
m = hashlib.md5()
m.update(file_name)
return CONTROL_BYTES['MUSIC_FILE'] + m.digest()[1:]
@app.route("/json/musicfiles")
def music_files():
"""
Get a list of music files and file identifier hashes as JSON; also refresh
internal cache of music files and hashes.
"""
global music_files_dict
file_paths = sorted(glob.glob(path.join(settings.MUSIC_ROOT, '*')))
out = []
music_files_dict = dict()
for file_path in file_paths:
file_name = path.split(file_path)[1]
file_hash = music_file_hash(file_name)
out.append(dict(name=file_name,
hash=binascii.b2a_hex(file_hash)))
music_files_dict[file_hash] = file_name
# set music files dict in RFID handler
rfid_handler.set_music_files_dict(music_files_dict)
return json.dumps(out)
@app.route("/json/readnfc")
def read_nfc():
"""
Get current status of NFC tag
"""
global music_files_dict
# get current NFC uid and data
uid = rfid_handler.get_uid()
if uid is None:
hex_uid = "none"
else:
hex_uid = binascii.b2a_hex(uid)
data = rfid_handler.get_data()
if data is None:
hex_data = "none"
description = "No tag present"
else:
hex_data = binascii.b2a_hex(data)
description = 'Unknown control byte or tag empty'
if data[0] == CONTROL_BYTES['MUSIC_FILE']:
if data in music_files_dict:
description = 'Play music file ' + music_files_dict[data]
else:
description = 'Play a music file not currently present on the device'
# output container
out = dict(uid=hex_uid,
data=hex_data,
description=description)
return json.dumps(out)
@app.route("/actions/writenfc")
def write_nfc():
"""
Write data to NFC tag
Data is contained in get argument 'data'.
"""
hex_data = request.args.get('data')
if hex_data is None:
logger.error("No data argument given for writenfc endpoint")
return
# convert from hex to bytes
data = binascii.a2b_hex(hex_data)
if data[0] == CONTROL_BYTES['MUSIC_FILE']:
if data not in music_files_dict:
return json.dumps(dict(message="Unknown hash value!"))
# write tag
success = rfid_handler.write(data)
if success:
file_name = music_files_dict[data]
return json.dumps(dict(message="Successfully wrote NFC tag for file: " + file_name))
else:
return json.dumps(dict(message="Error writing NFC tag data " + hex_data))
else:
return json.dumps(dict(message='Unknown control byte: ' + binascii.b2a_hex(data[0])))
@app.route("/")
def home():
# reset wlan shutdown counter when loading page
rfid_handler.reset_startup_timer()
return render_template("home.html")
if __name__ == "__main__":
# start RFID polling
rfid_polling_process.start()
# initialize music files dict
music_files()
# run server
app.run(host=settings.SERVER_HOST_MASK,
port=settings.SERVER_PORT,
threaded=True)
|
random_shuffle_queue_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class RandomShuffleQueueTest(tf.test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf.logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf.logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=tf.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.RandomShuffleQueue(
10, 5, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = tf.RandomShuffleQueue(10, 5, tf.int32, shapes=tf.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, [tf.int32, tf.int32],
shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(3, 0, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, (tf.int32, tf.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32)
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
enqueue_op = q.enqueue(
(tf.constant([10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"requires the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# Unlike tf.Queue, RandomShuffleQueue does not make any
# attempt to support DequeueMany with unspecified shapes, even if
# a shape could be inferred from the elements enqueued.
with self.assertRaisesOpError(
"requires the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, (tf.float32, tf.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = tf.RandomShuffleQueue(
10, 0, tf.int32, ((4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.RandomShuffleQueue(100, 0, tf.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.RandomShuffleQueue(
total_count, 0, tf.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 2, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 2, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
def dequeue():
for _ in elems:
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1: break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# However, the last result was dequeued before the queue was closed,
# so nothing more is added to results.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 3)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 3)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 4, tf.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
# At this point the close operation will become unblocked, so the
# next enqueue will fail.
with self.assertRaisesRegexp(tf.errors.AbortedError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
self.assertEqual(size_t.eval(), 4)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# The close_op should run before the second blocking_enqueue_op
# has started.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.RandomShuffleQueue(
1, 0, tf.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.RandomShuffleQueue(
1, 0, tf.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_a")
q_a_2 = tf.RandomShuffleQueue(
15, 5, tf.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.RandomShuffleQueue(
10, 0, tf.float32, shared_name="q_b")
q_b_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.eval()
q_c_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_c")
q_c_2 = tf.RandomShuffleQueue(
10, 5, tf.int32, shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.eval()
q_d_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_d")
q_d_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.eval()
q_g_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_g")
q_g_2 = tf.RandomShuffleQueue(
10, 5, (tf.float32, tf.int32), shared_name="q_g")
q_g_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.eval()
q_h_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, seed=12, shared_name="q_h")
q_h_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.eval()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
tf.RandomShuffleQueue(10, 0, tf.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.RandomShuffleQueue(10, 0, tf.float32)
q2 = tf.RandomShuffleQueue(15, 0, tf.float32)
enq_q = tf.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.RandomShuffleQueue(
5, 0, tf.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.RandomShuffleQueue(5, 0, tf.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=1729)
q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=1729)
q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
5, 0, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(2, 0, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
tf.test.main()
|
master.py
|
"""
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
"""
import collections
import copy
import ctypes
import functools
import logging
import multiprocessing
import os
import re
import signal
import stat
import sys
import threading
import time
import salt.acl
import salt.auth
import salt.client
import salt.client.ssh.client
import salt.crypt
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.engines
import salt.exceptions
import salt.ext.tornado.gen
import salt.key
import salt.log.setup
import salt.minion
import salt.payload
import salt.pillar
import salt.runner
import salt.serializers.msgpack
import salt.state
import salt.transport.server
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
import salt.wheel
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.ext.tornado.stack_context import StackContext
from salt.transport import iter_transport_opts
from salt.utils.ctx import RequestContext
from salt.utils.debug import (
enable_sigusr1_handler,
enable_sigusr2_handler,
inspect_stack,
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.zeromq import ZMQ_VERSION_INFO, zmq
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
log = logging.getLogger(__name__)
class SMaster:
"""
Create a simple salt-master, this will generate the top-level master
"""
secrets = (
{}
) # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
"""
Create a salt master server instance
:param dict opts: The salt options dictionary
"""
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
super().__setstate__(state)
self.master_key = state["master_key"]
self.key = state["key"]
SMaster.secrets = state["secrets"]
def __getstate__(self):
state = super().__getstate__()
state.update(
{
"key": self.key,
"master_key": self.master_key,
"secrets": SMaster.secrets,
}
)
return state
def __prep_key(self):
"""
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
"""
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingProcess):
"""
A generalized maintenance process which performs maintenance routines.
"""
def __init__(self, opts, **kwargs):
"""
Create a maintenance instance
:param dict opts: The salt options
"""
super().__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts["loop_interval"])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
def _post_fork_init(self):
"""
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
"""
# Load Runners
ropts = dict(self.opts)
ropts["quiet"] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(
self.opts, runner_client.functions_dict(), returners=self.returners
)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
if self.opts["maintenance_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Maintenance niceness to %d", self.opts["maintenance_niceness"]
)
os.nice(self.opts["maintenance_niceness"])
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
"""
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
# update git_pillar on first loop
last_git_pillar_update = 0
git_pillar_update_interval = self.opts.get("git_pillar_update_interval", 0)
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
if (now - last_git_pillar_update) >= git_pillar_update_interval:
last_git_pillar_update = now
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
"""
Evaluate accepted keys and create a msgpack file
which contains a list
"""
if self.opts["key_cache"] == "sched":
keys = []
# TODO DRY from CKMinions
if self.opts["transport"] in ("zeromq", "tcp"):
acc = "minions"
else:
acc = "accepted"
for fn_ in os.listdir(os.path.join(self.opts["pki_dir"], acc)):
if not fn_.startswith(".") and os.path.isfile(
os.path.join(self.opts["pki_dir"], acc, fn_)
):
keys.append(fn_)
log.debug("Writing master key cache")
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(
os.path.join(self.opts["pki_dir"], acc, ".key_cache"), mode="wb"
) as cache_file:
salt.payload.dump(keys, cache_file)
def handle_key_rotate(self, now):
"""
Rotate the AES key rotation
"""
to_rotate = False
dfn = os.path.join(self.opts["cachedir"], ".dfn")
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error("Found dropfile with incorrect permissions, ignoring...")
os.remove(dfn)
except os.error:
pass
if self.opts.get("publish_session"):
if now - self.rotate >= self.opts["publish_session"]:
to_rotate = True
if to_rotate:
log.info("Rotating master AES key")
for secret_key, secret_map in SMaster.secrets.items():
# should be unnecessary-- since no one else should be modifying
with secret_map["secret"].get_lock():
secret_map["secret"].value = salt.utils.stringutils.to_bytes(
secret_map["reload"]()
)
self.event.fire_event(
{"rotate_{}_key".format(secret_key): True}, tag="key"
)
self.rotate = now
if self.opts.get("ping_on_rotate"):
# Ping all minions to get them to pick up the new key
log.debug("Pinging all connected minions due to key rotation")
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
"""
Update git pillar
"""
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc: # pylint: disable=broad-except
log.error("Exception caught while updating git_pillar", exc_info=True)
def handle_schedule(self):
"""
Evaluate the scheduler
"""
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc: # pylint: disable=broad-except
log.error("Exception %s occurred in scheduled job", exc)
self.schedule.cleanup_subprocesses()
def handle_presence(self, old_present):
"""
Fire presence events if enabled
"""
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {"new": list(new), "lost": list(lost)}
self.event.fire_event(data, tagify("change", "presence"))
data = {"present": list(present)}
self.event.fire_event(data, tagify("present", "presence"))
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
"""
A process from which to update any dynamic fileserver backends
"""
def __init__(self, opts, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
def fill_buckets(self):
"""
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
"""
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = "{}.update".format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug("No update function for the %s filserver backend", backend)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in update_intervals[backend].items():
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
"An update_interval of 0 is not supported, "
"falling back to %s",
interval,
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = "{}_update_interval".format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
"%s key missing from configuration. Falling back to "
"default interval of %d seconds",
interval_key,
interval,
)
self.buckets.setdefault(interval, OrderedDict())[
(backend, update_func)
] = None
@staticmethod
def _do_update(backends):
"""
Perform fileserver updates
"""
for backend, update_args in backends.items():
backend_name, update_func = backend
try:
if update_args:
log.debug(
"Updating %s fileserver cache for the following targets: %s",
backend_name,
update_args,
)
args = (update_args,)
else:
log.debug("Updating %s fileserver cache", backend_name)
args = ()
update_func(*args)
except Exception as exc: # pylint: disable=broad-except
log.exception(
"Uncaught exception while updating %s fileserver cache",
backend_name,
)
@classmethod
def update(cls, interval, backends, timeout=300):
"""
Threading target which handles all updates for a given wait interval
"""
start = time.time()
condition = threading.Condition()
while time.time() - start < timeout:
log.debug(
"Performing fileserver updates for items with an update interval of %d",
interval,
)
cls._do_update(backends)
log.debug(
"Completed fileserver updates for items with an update "
"interval of %d, waiting %d seconds",
interval,
interval,
)
with condition:
condition.wait(interval)
def run(self):
"""
Start the update threads
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
if (
self.opts["fileserver_update_niceness"]
and not salt.utils.platform.is_windows()
):
log.info(
"setting FileServerUpdate niceness to %d",
self.opts["fileserver_update_niceness"],
)
os.nice(self.opts["fileserver_update_niceness"])
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
while self.update_threads:
for name, thread in list(self.update_threads.items()):
thread.join(1)
if not thread.is_alive():
self.update_threads.pop(name)
class Master(SMaster):
"""
The salt master server
"""
def __init__(self, opts):
"""
Create a salt master server instance
:param dict: The salt options
"""
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
"You have a version of ZMQ less than ZMQ 3.2! There are "
"known connection keep-alive issues with ZMQ < 3.2 which "
"may result in loss of contact with minions. Please "
"upgrade your ZMQ!"
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
"Current values for max open files soft/hard setting: %s/%s", mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts["max_open_files"]
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
"The value for the 'max_open_files' setting, %s, is higher "
"than the highest value the user running salt is allowed to "
"set (%s). Defaulting to %s.",
mof_c,
mof_h,
mof_h,
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info("Raising max open files value to %s", mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
"New values for max open files soft/hard values: %s/%s",
mof_s,
mof_h,
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
"Failed to raise max open files setting to %s. If this "
"value is too low, the salt-master will most likely fail "
"to run properly.",
mof_c,
)
def _pre_flight(self):
"""
Run pre flight checks. If anything in this method fails then the master
should not start up.
"""
errors = []
critical_errors = []
try:
os.chdir("/")
except OSError as err:
errors.append("Cannot change to root directory ({})".format(err))
if self.opts.get("fileserver_verify_config", True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
"Failed to load fileserver backends, the configured backends "
"are: {}".format(", ".join(self.opts["fileserver_backend"]))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append("{}".format(exc))
if not self.opts["fileserver_backend"]:
errors.append("No fileserver backends are configured")
# Check to see if we need to create a pillar cache dir
if self.opts["pillar_cache"] and not os.path.isdir(
os.path.join(self.opts["cachedir"], "pillar_cache")
):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts["cachedir"], "pillar_cache"))
except OSError:
pass
if self.opts.get("git_pillar_verify_config", True):
try:
git_pillars = [
x
for x in self.opts.get("ext_pillar", [])
if "git" in x and not isinstance(x["git"], str)
]
except TypeError:
git_pillars = []
critical_errors.append(
"Invalid ext_pillar configuration. It is likely that the "
"external pillar type was not specified for one or more "
"external pillars."
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts["ext_pillar"] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo["git"],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical("Master failed pre flight checks, exiting\n")
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
"""
Turn on the master server components
"""
self._pre_flight()
log.info("salt-master is starting as user '%s'", salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets["aes"] = {
"secret": multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
),
),
"reload": salt.crypt.Crypticle.generate_key_string,
}
log.info("Creating master process manager")
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info("Creating master publisher process")
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager, kwargs={"log_queue": log_queue})
pub_channels.append(chan)
log.info("Creating master event publisher process")
self.process_manager.add_process(
salt.utils.event.EventPublisher, args=(self.opts,)
)
if self.opts.get("reactor"):
if isinstance(self.opts["engines"], list):
rine = False
for item in self.opts["engines"]:
if "reactor" in item:
rine = True
break
if not rine:
self.opts["engines"].append({"reactor": {}})
else:
if "reactor" not in self.opts["engines"]:
log.info("Enabling the reactor engine")
self.opts["engines"]["reactor"] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info("Creating master maintenance process")
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get("event_return"):
log.info("Creating master event return process")
self.process_manager.add_process(
salt.utils.event.EventReturn, args=(self.opts,)
)
ext_procs = self.opts.get("ext_processes", [])
for proc in ext_procs:
log.info("Creating ext_processes process: %s", proc)
try:
mod = ".".join(proc.split(".")[:-1])
cls = proc.split(".")[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception: # pylint: disable=broad-except
log.error("Error creating ext_processes process: %s", proc)
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts["con_cache"]:
log.info("Creating master concache process")
self.process_manager.add_process(
salt.utils.master.ConnectedCache, args=(self.opts,)
)
# workaround for issue #16315, race condition
log.debug("Sleeping for two seconds to let concache rest")
time.sleep(2)
log.info("Creating master request server process")
kwargs = {}
if salt.utils.platform.is_windows():
kwargs["log_queue"] = log_queue
kwargs[
"log_queue_level"
] = salt.log.setup.get_multiprocessing_logging_level()
kwargs["secrets"] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name="ReqServer",
)
self.process_manager.add_process(FileserverUpdate, args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts["discovery"]:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(
salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts["discovery"]["port"],
listen_ip=self.opts["interface"],
answer={
"mapping": self.opts["discovery"].get("mapping", {})
},
).run
)
else:
log.error("Unable to load SSDP: asynchronous IO is not available.")
if sys.version_info.major == 2:
log.error(
'You are using Python 2, please install "trollius" module'
" to enable SSDP discovery."
)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class ReqServer(salt.utils.process.SignalHandlingProcess):
"""
Starts up the master request server, minions send results to this
interface.
"""
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
"""
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
"""
super().__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super()._handle_signals(signum, sigframe)
def __bind(self):
"""
Binds the reply server
"""
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts["cachedir"], ".dfn")
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager", wait_for_kill=1
)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != "tcp":
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs["log_queue"] = self.log_queue
kwargs["log_queue_level"] = self.log_queue_level
if self.opts["req_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting ReqServer_ProcessManager niceness to %d",
self.opts["req_server_niceness"],
)
os.nice(self.opts["req_server_niceness"])
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts["worker_threads"])):
name = "MWorker-{}".format(ind)
self.process_manager.add_process(
MWorker,
args=(self.opts, self.master_key, self.key, req_channels, name),
kwargs=kwargs,
name=name,
)
self.process_manager.run()
def run(self):
"""
Start up the ReqServer
"""
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, "process_manager"):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
class MWorker(salt.utils.process.SignalHandlingProcess):
"""
The worker multiprocess instance to manage the backend operations for the
salt master.
"""
def __init__(self, opts, mkey, key, req_channels, name, **kwargs):
"""
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
"""
kwargs["name"] = name
self.name = name
super().__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
super().__setstate__(state)
self.k_mtime = state["k_mtime"]
SMaster.secrets = state["secrets"]
def __getstate__(self):
state = super().__getstate__()
state.update({"k_mtime": self.k_mtime, "secrets": SMaster.secrets})
return state
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, "req_channels", ()):
channel.close()
self.clear_funcs.destroy()
super()._handle_signals(signum, sigframe)
def __bind(self):
"""
Bind to the local port
"""
self.io_loop = salt.ext.tornado.ioloop.IOLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(
self._handle_payload, io_loop=self.io_loop
) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@salt.ext.tornado.gen.coroutine
def _handle_payload(self, payload):
"""
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
"""
key = payload["enc"]
load = payload["load"]
ret = {"aes": self._handle_aes, "clear": self._handle_clear}[key](load)
raise salt.ext.tornado.gen.Return(ret)
def _post_stats(self, start, cmd):
"""
Calculate the master stats and fire events with stat info
"""
end = time.time()
duration = end - start
self.stats[cmd]["mean"] = (
self.stats[cmd]["mean"] * (self.stats[cmd]["runs"] - 1) + duration
) / self.stats[cmd]["runs"]
if end - self.stat_clock > self.opts["master_stats_event_iter"]:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event(
{
"time": end - self.stat_clock,
"worker": self.name,
"stats": self.stats,
},
tagify(self.name, "stats"),
)
self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = end
def _handle_clear(self, load):
"""
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
"""
log.trace("Clear payload received with command %s", load["cmd"])
cmd = load["cmd"]
method = self.clear_funcs.get_method(cmd)
if not method:
return {}, {"fun": "send_clear"}
if self.opts["master_stats"]:
start = time.time()
self.stats[cmd]["runs"] += 1
ret = method(load), {"fun": "send_clear"}
if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def _handle_aes(self, data):
"""
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
"""
if "cmd" not in data:
log.error("Received malformed command %s", data)
return {}
cmd = data["cmd"]
log.trace("AES payload received with command %s", data["cmd"])
method = self.aes_funcs.get_method(cmd)
if not method:
return {}, {"fun": "send"}
if self.opts["master_stats"]:
start = time.time()
self.stats[cmd]["runs"] += 1
def run_func(data):
return self.aes_funcs.run_func(data["cmd"], data)
with StackContext(
functools.partial(RequestContext, {"data": data, "opts": self.opts})
):
ret = run_func(data)
if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def run(self):
"""
Start a Master Worker
"""
salt.utils.process.appendproctitle(self.name)
# if we inherit req_server level without our own, reset it
if not salt.utils.platform.is_windows():
enforce_mworker_niceness = True
if self.opts["req_server_niceness"]:
if salt.utils.user.get_user() == "root":
log.info(
"%s decrementing inherited ReqServer niceness to 0", self.name
)
log.info(os.nice())
os.nice(-1 * self.opts["req_server_niceness"])
else:
log.error(
"%s unable to decrement niceness for MWorker, not running as"
" root",
self.name,
)
enforce_mworker_niceness = False
# else set what we're explicitly asked for
if enforce_mworker_niceness and self.opts["mworker_niceness"]:
log.info(
"setting %s niceness to %i",
self.name,
self.opts["mworker_niceness"],
)
os.nice(self.opts["mworker_niceness"])
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
class TransportMethods:
"""
Expose methods to the transport layer, methods with their names found in
the class attribute 'expose_methods' will be exposed to the transport layer
via 'get_method'.
"""
expose_methods = ()
def get_method(self, name):
"""
Get a method which should be exposed to the transport layer
"""
if name in self.expose_methods:
try:
return getattr(self, name)
except AttributeError:
log.error("Requested method not exposed: %s", name)
else:
log.error("Requested method not exposed: %s", name)
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(TransportMethods):
"""
Set up functions that are available when the load is encrypted with AES
"""
expose_methods = (
"verify_minion",
"_master_tops",
"_master_opts",
"_mine_get",
"_mine",
"_mine_delete",
"_mine_flush",
"_file_recv",
"_pillar",
"_minion_event",
"_handle_minion_event",
"_return",
"_syndic_return",
"minion_runner",
"pub_ret",
"minion_pub",
"minion_publish",
"revoke_auth",
"_serve_file",
"_file_find",
"_file_hash",
"_file_hash_and_stat",
"_file_list",
"_file_list_emptydirs",
"_dir_list",
"_symlink_list",
"_file_envs",
)
def __init__(self, opts):
"""
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
"""
self.opts = opts
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts["conf_file"])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts, states=False, rend=False, ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
"""
Set the local file objects from the file server interface
"""
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
"""
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
"""
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts["pki_dir"], "minions", id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except OSError:
log.warning(
"Salt minion claiming to be %s attempted to communicate with "
"master, but key could not be read and verification was denied.",
id_,
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b"salt":
return True
except ValueError as err:
log.error("Unable to decrypt token: %s", err)
log.error(
"Salt minion claiming to be %s has attempted to communicate with "
"the master and could not be verified",
id_,
)
return False
def verify_minion(self, id_, token):
"""
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
"""
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
"""
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
"""
# Verify that the load is valid
if "peer" not in self.opts:
return False
if not isinstance(self.opts["peer"], dict):
return False
if any(
key not in clear_load for key in ("fun", "arg", "tgt", "ret", "tok", "id")
):
return False
# If the command will make a recursive publish don't run
if clear_load["fun"].startswith("publish."):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load["id"], clear_load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
"Minion id %s is not who it says it is and is attempting "
"to issue a peer command",
clear_load["id"],
)
return False
clear_load.pop("tok")
perms = []
for match in self.opts["peer"]:
if re.match(match, clear_load["id"]):
# This is the list of funcs/modules!
if isinstance(self.opts["peer"][match], list):
perms.extend(self.opts["peer"][match])
if "," in clear_load["fun"]:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load["fun"] = clear_load["fun"].split(",")
arg_ = []
for arg in clear_load["arg"]:
arg_.append(arg.split())
clear_load["arg"] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load["fun"],
clear_load["arg"],
clear_load["tgt"],
clear_load.get("tgt_type", "glob"),
publish_validate=True,
)
def __verify_load(self, load, verify_keys):
"""
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
"""
if any(key not in load for key in verify_keys):
return False
if "tok" not in load:
log.error(
"Received incomplete call from %s for '%s', missing '%s'",
load["id"],
inspect_stack()["co_name"],
"tok",
)
return False
if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning("Minion id %s is not who it says it is!", load["id"])
return False
if "tok" in load:
load.pop("tok")
return load
def _master_tops(self, load):
"""
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
def _master_opts(self, load):
"""
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
"""
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts["file_roots"] = file_roots
mopts["top_file_merging_strategy"] = self.opts["top_file_merging_strategy"]
mopts["env_order"] = self.opts["env_order"]
mopts["default_top"] = self.opts["default_top"]
if load.get("env_only"):
return mopts
mopts["renderer"] = self.opts["renderer"]
mopts["failhard"] = self.opts["failhard"]
mopts["state_top"] = self.opts["state_top"]
mopts["state_top_saltenv"] = self.opts["state_top_saltenv"]
mopts["nodegroups"] = self.opts["nodegroups"]
mopts["state_auto_order"] = self.opts["state_auto_order"]
mopts["state_events"] = self.opts["state_events"]
mopts["state_aggregate"] = self.opts["state_aggregate"]
mopts["jinja_env"] = self.opts["jinja_env"]
mopts["jinja_sls_env"] = self.opts["jinja_sls_env"]
mopts["jinja_lstrip_blocks"] = self.opts["jinja_lstrip_blocks"]
mopts["jinja_trim_blocks"] = self.opts["jinja_trim_blocks"]
return mopts
def _mine_get(self, load):
"""
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
"""
load = self.__verify_load(load, ("id", "tgt", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
"""
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
"""
load = self.__verify_load(load, ("id", "data", "tok"))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
"""
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
"""
load = self.__verify_load(load, ("id", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
"""
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
"""
Allows minions to send files to the master, files are sent to the
master file cache
"""
if any(key not in load for key in ("id", "path", "loc")):
return False
if not isinstance(load["path"], list):
return False
if not self.opts["file_recv"]:
return False
if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
file_recv_max_size = 1024 * 1024 * self.opts["file_recv_max_size"]
if "loc" in load and load["loc"] < 0:
log.error("Invalid file pointer: load[loc] < 0")
return False
if len(load["data"]) + load.get("loc", 0) > file_recv_max_size:
log.error(
"file_recv_max_size limit of %d MB exceeded! %s will be "
"truncated. To successfully push this file, adjust "
"file_recv_max_size to an integer (in MB) large enough to "
"accommodate it.",
file_recv_max_size,
load["path"],
)
return False
if "tok" not in load:
log.error(
"Received incomplete call from %s for '%s', missing '%s'",
load["id"],
inspect_stack()["co_name"],
"tok",
)
return False
if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning("Minion id %s is not who it says it is!", load["id"])
return {}
load.pop("tok")
# Join path
sep_path = os.sep.join(load["path"])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or "../" in load["path"]:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts["cachedir"], "minions", load["id"], "files", normpath
)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts["cachedir"]):
log.warning(
"Attempt to write received file outside of master cache "
"directory! Requested path: %s. Access denied.",
cpath,
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load["loc"] != 0:
mode = "ab"
else:
mode = "wb"
with salt.utils.files.fopen(cpath, mode) as fp_:
if load["loc"]:
fp_.seek(load["loc"])
fp_.write(salt.utils.stringutils.to_bytes(load["data"]))
return True
def _pillar(self, load):
"""
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
"""
if any(key not in load for key in ("id", "grains")):
return False
if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
load["grains"]["id"] = load["id"]
pillar = salt.pillar.get_pillar(
self.opts,
load["grains"],
load["id"],
load.get("saltenv", load.get("env")),
ext=load.get("ext"),
pillar_override=load.get("pillar_override", {}),
pillarenv=load.get("pillarenv"),
extra_minion_data=load.get("extra_minion_data"),
clean_cache=load.get("clean_cache"),
)
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get("minion_data_cache", False):
self.masterapi.cache.store(
"minions/{}".format(load["id"]),
"data",
{"grains": load["grains"], "pillar": data},
)
if self.opts.get("minion_data_cache_events") is True:
self.event.fire_event(
{"Minion data cache refresh": load["id"]},
tagify(load["id"], "refresh", "minion"),
)
return data
def _minion_event(self, load):
"""
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
"""
Act on specific events from minions
"""
id_ = load["id"]
if load.get("tag", "") == "_salt_error":
log.error(
"Received minion error from [%s]: %s", id_, load["data"]["message"]
)
for event in load.get("events", []):
event_data = event.get("data", {})
if "minions" in event_data:
jid = event_data.get("jid")
if not jid:
continue
minions = event_data["minions"]
try:
salt.utils.job.store_minions(
self.opts, jid, minions, mminion=self.mminion, syndic_id=id_
)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
"Could not add minion(s) %s for job %s: %s", minions, jid, exc
)
def _return(self, load):
"""
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
"""
if self.opts["require_minion_sign_messages"] and "sig" not in load:
log.critical(
"_return: Master is requiring minions to sign their "
"messages, but there is no signature in this payload from "
"%s.",
load["id"],
)
return False
if "sig" in load:
log.trace("Verifying signed event publish from minion")
sig = load.pop("sig")
this_minion_pubkey = os.path.join(
self.opts["pki_dir"], "minions/{}".format(load["id"])
)
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(
this_minion_pubkey, serialized_load, sig
):
log.info("Failed to verify event signature from minion %s.", load["id"])
if self.opts["drop_messages_signature_fail"]:
log.critical(
"drop_messages_signature_fail is enabled, dropping "
"message from %s",
load["id"],
)
return False
else:
log.info(
"But 'drop_message_signature_fail' is disabled, so message is"
" still accepted."
)
load["sig"] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion
)
except salt.exceptions.SaltCacheError:
log.error("Could not store job information for load: %s", load)
def _syndic_return(self, load):
"""
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
"""
loads = load.get("load")
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ("return", "jid", "id")):
continue
# if we have a load, save it
if load.get("load"):
fstr = "{}.save_load".format(self.opts["master_job_cache"])
self.mminion.returners[fstr](load["jid"], load["load"])
# Register the syndic
syndic_cache_path = os.path.join(
self.opts["cachedir"], "syndics", load["id"]
)
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, "w") as wfh:
wfh.write("")
# Format individual return loads
for key, item in load["return"].items():
ret = {"jid": load["jid"], "id": key}
ret.update(item)
if "master_id" in load:
ret["master_id"] = load["master_id"]
if "fun" in load:
ret["fun"] = load["fun"]
if "arg" in load:
ret["fun_args"] = load["arg"]
if "out" in load:
ret["out"] = load["out"]
if "sig" in load:
ret["sig"] = load["sig"]
self._return(ret)
def minion_runner(self, clear_load):
"""
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
"""
load = self.__verify_load(clear_load, ("fun", "arg", "id", "tok"))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
"""
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
"""
load = self.__verify_load(load, ("jid", "id", "tok"))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(self.opts["cachedir"], "publish_auth")
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(load["jid"]))
with salt.utils.files.fopen(jid_fn, "r") as fp_:
if not load["id"] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load["jid"])
def minion_pub(self, clear_load):
"""
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
"""
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
"""
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
"""
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
"""
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
"""
load = self.__verify_load(load, ("id", "tok"))
if not self.opts.get("allow_minion_key_revoke", False):
log.warning(
"Minion %s requested key revoke, but allow_minion_key_revoke "
"is set to False",
load["id"],
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
"""
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
"""
# Don't honor private functions
if func.startswith("__"):
# TODO: return some error? Seems odd to return {}
return {}, {"fun": "send"}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
"Master function call %s took %s seconds", func, time.time() - start
)
except Exception: # pylint: disable=broad-except
ret = ""
log.error("Error in function %s:\n", func, exc_info=True)
else:
log.error(
"Received function %s which is unavailable on the master, "
"returning False",
func,
)
return False, {"fun": "send"}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == "_return":
return ret, {"fun": "send"}
if func == "_pillar" and "id" in load:
if load.get("ver") != "2" and self.opts["pillar_version"] == 1:
# Authorized to return old pillar proto
return ret, {"fun": "send"}
return ret, {"fun": "send_private", "key": "pillar", "tgt": load["id"]}
# Encrypt the return
return ret, {"fun": "send"}
def destroy(self):
self.masterapi.destroy()
if self.local is not None:
self.local.destroy()
self.local = None
class ClearFuncs(TransportMethods):
"""
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
"""
# These methods will be exposed to the transport layer by
# MWorker._handle_clear
expose_methods = (
"ping",
"publish",
"get_token",
"mk_token",
"wheel",
"runner",
)
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
# Make a client
self.local = salt.client.get_local_client(self.opts["conf_file"])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts, states=False, rend=False, ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
"""
Send a master control function back to the runner system
"""
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
return {"error": error}
# Authorize
username = auth_check.get("username")
if auth_type != "user":
runner_check = self.ckminions.runner_check(
auth_check.get("auth_list", []),
clear_load["fun"],
clear_load.get("kwarg", {}),
)
if not runner_check:
return {
"error": {
"name": err_name,
"message": (
'Authentication failure of type "{}" occurred for '
"user {}.".format(auth_type, username)
),
}
}
elif isinstance(runner_check, dict) and "error" in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if "user" in clear_load:
username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop("fun")
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(
fun, clear_load.get("kwarg", {}), username, local=True
)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred while introspecting %s: %s", fun, exc)
return {
"error": {
"name": exc.__class__.__name__,
"args": exc.args,
"message": str(exc),
}
}
def wheel(self, clear_load):
"""
Send a master control function back to the wheel system
"""
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
return {"error": error}
# Authorize
username = auth_check.get("username")
if auth_type != "user":
wheel_check = self.ckminions.wheel_check(
auth_check.get("auth_list", []),
clear_load["fun"],
clear_load.get("kwarg", {}),
)
if not wheel_check:
return {
"error": {
"name": err_name,
"message": (
'Authentication failure of type "{}" occurred for '
"user {}.".format(auth_type, username)
),
}
}
elif isinstance(wheel_check, dict) and "error" in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if "user" in clear_load:
username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop("fun")
tag = tagify(jid, prefix="wheel")
data = {
"fun": "wheel.{}".format(fun),
"jid": jid,
"tag": tag,
"user": username,
}
self.event.fire_event(data, tagify([jid, "new"], "wheel"))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data["return"] = ret["return"]
data["success"] = ret["success"]
self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
return {"tag": tag, "data": data}
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred while introspecting %s: %s", fun, exc)
data["return"] = "Exception occurred in wheel {}: {}: {}".format(
fun,
exc.__class__.__name__,
exc,
)
data["success"] = False
self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
return {"tag": tag, "data": data}
def mk_token(self, clear_load):
"""
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
"""
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ""
return token
def get_token(self, clear_load):
"""
Return the name associated with a token or False if the token is invalid
"""
if "token" not in clear_load:
return False
return self.loadauth.get_tok(clear_load["token"])
def publish(self, clear_load):
"""
This method sends out publications to the minions, it can only be used
by the LocalClient.
"""
extra = clear_load.get("kwargs", {})
publisher_acl = salt.acl.PublisherACL(self.opts["publisher_acl_blacklist"])
if publisher_acl.user_is_blacklisted(
clear_load["user"]
) or publisher_acl.cmd_is_blacklisted(clear_load["fun"]):
log.error(
"%s does not have permissions to run %s. Please contact "
"your local administrator if you believe this is in "
"error.\n",
clear_load["user"],
clear_load["fun"],
)
return {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
# Retrieve the minions list
delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter
)
minions = _res.get("minions", list())
missing = _res.get("missing", list())
ssh_minions = _res.get("ssh_minions", False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == "user":
auth_check = self.loadauth.check_authentication(
clear_load, auth_type, key=key
)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get("auth_list", [])
err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type)
if auth_check.get("error"):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != "user" or (auth_type == "user" and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load["fun"],
clear_load["arg"],
clear_load["tgt"],
clear_load.get("tgt_type", "glob"),
minions=minions,
# always accept find_job
whitelist=["saltutil.find_job"],
)
if not authorized:
# Authorization error occurred. Do not continue.
if (
auth_type == "eauth"
and not auth_list
and "username" in extra
and "eauth" in extra
):
log.debug(
'Auth configuration for eauth "%s" and user "%s" is empty',
extra["eauth"],
extra["username"],
)
log.warning(err_msg)
return {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
# Perform some specific auth_type tasks after the authorization check
if auth_type == "token":
username = auth_check.get("username")
clear_load["user"] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == "eauth":
# The username we are attempting to auth with
clear_load["user"] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get("order_masters"):
# Check for no minions
if not minions:
return {
"enc": "clear",
"load": {
"jid": None,
"minions": minions,
"error": (
"Master could not resolve minions for target {}".format(
clear_load["tgt"]
)
),
},
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {"enc": "clear", "load": {"error": "Master failed to assign jid"}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
"enc": "clear",
"load": {"jid": clear_load["jid"], "minions": minions, "missing": missing},
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if "token" in clear_load:
auth_type = "token"
err_name = "TokenAuthenticationError"
sensitive_load_keys = ["token"]
elif "eauth" in clear_load:
auth_type = "eauth"
err_name = "EauthAuthenticationError"
sensitive_load_keys = ["username", "password"]
else:
auth_type = "user"
err_name = "UserAuthenticationError"
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
"""
Return a jid for this publication
"""
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load["jid"] if clear_load.get("jid") else None
nocache = extra.get("nocache", False)
# Retrieve the jid
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache, passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
"Failed to allocate a jid. The requested returner '{}' "
"could not be loaded.".format(fstr.split(".")[0])
)
log.error(msg)
return {"error": msg}
return jid
def _send_pub(self, load):
"""
Take a load and send it across the network to connected minions
"""
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, "_ssh_client"):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
"""
Take a load and send it across the network to ssh minions
"""
if self.opts["enable_ssh_minions"] is True and ssh_minions is True:
log.debug("Send payload to ssh minions")
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
"""
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
"""
clear_load["jid"] = jid
delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({"minions": minions}, clear_load["jid"])
new_job_load = {
"jid": clear_load["jid"],
"tgt_type": clear_load["tgt_type"],
"tgt": clear_load["tgt"],
"user": clear_load["user"],
"fun": clear_load["fun"],
"arg": clear_load["arg"],
"minions": minions,
"missing": missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load["jid"], "new"], "job"))
if self.opts["ext_job_cache"]:
fstr = "{}.save_load".format(self.opts["ext_job_cache"])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(
self.mminion.returners[fstr]
)
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if "minions" not in arg_spec.args:
log.critical(
"The specified returner used for the external job cache "
"'%s' does not have a 'minions' kwarg in the returner's "
"save_load function.",
self.opts["ext_job_cache"],
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
"The specified returner used for the external job cache "
'"%s" does not have a save_load function!',
self.opts["ext_job_cache"],
)
if save_load_func:
try:
self.mminion.returners[fstr](
clear_load["jid"], clear_load, minions=minions
)
except Exception: # pylint: disable=broad-except
log.critical(
"The specified returner threw a stack trace:\n", exc_info=True
)
# always write out to the master job caches
try:
fstr = "{}.save_load".format(self.opts["master_job_cache"])
self.mminion.returners[fstr](clear_load["jid"], clear_load, minions)
except KeyError:
log.critical(
"The specified returner used for the master job cache "
'"%s" does not have a save_load function!',
self.opts["master_job_cache"],
)
except Exception: # pylint: disable=broad-except
log.critical("The specified returner threw a stack trace:\n", exc_info=True)
# Set up the payload
payload = {"enc": "aes"}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
"fun": clear_load["fun"],
"arg": clear_load["arg"],
"tgt": clear_load["tgt"],
"jid": clear_load["jid"],
"ret": clear_load["ret"],
}
# if you specified a master id, lets put that in the load
if "master_id" in self.opts:
load["master_id"] = self.opts["master_id"]
# if someone passed us one, use that
if "master_id" in extra:
load["master_id"] = extra["master_id"]
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load["delimiter"] = delimiter
if "id" in extra:
load["id"] = extra["id"]
if "tgt_type" in clear_load:
load["tgt_type"] = clear_load["tgt_type"]
if "to" in clear_load:
load["to"] = clear_load["to"]
if "kwargs" in clear_load:
if "ret_config" in clear_load["kwargs"]:
load["ret_config"] = clear_load["kwargs"].get("ret_config")
if "metadata" in clear_load["kwargs"]:
load["metadata"] = clear_load["kwargs"].get("metadata")
if "module_executors" in clear_load["kwargs"]:
load["module_executors"] = clear_load["kwargs"].get("module_executors")
if "executor_opts" in clear_load["kwargs"]:
load["executor_opts"] = clear_load["kwargs"].get("executor_opts")
if "ret_kwargs" in clear_load["kwargs"]:
load["ret_kwargs"] = clear_load["kwargs"].get("ret_kwargs")
if "user" in clear_load:
log.info(
"User %s Published command %s with jid %s",
clear_load["user"],
clear_load["fun"],
clear_load["jid"],
)
load["user"] = clear_load["user"]
else:
log.info(
"Published command %s with jid %s", clear_load["fun"], clear_load["jid"]
)
log.debug("Published command details %s", load)
return load
def ping(self, clear_load):
"""
Send the load back to the sender.
"""
return clear_load
def destroy(self):
if self.masterapi is not None:
self.masterapi.destroy()
self.masterapi = None
if self.local is not None:
self.local.destroy()
self.local = None
|
tcpros_pubsub.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""Internal use: Topic-specific extensions for TCPROS support"""
import socket
import threading
import time
try:
from xmlrpc.client import ServerProxy # Python 3.x
except ImportError:
from xmlrpclib import ServerProxy # Python 2.x
from rospy.core import logwarn, logerr, logdebug, rospyerr
import rospy.exceptions
import rospy.names
import rospy.impl.registration
import rospy.impl.transport
from rospy.impl.tcpros_base import TCPROSTransport, TCPROSTransportProtocol, \
get_tcpros_server_address, start_tcpros_server,\
DEFAULT_BUFF_SIZE, TCPROS
class TCPROSSub(TCPROSTransportProtocol):
"""
Subscription transport implementation for receiving topic data via
peer-to-peer TCP/IP sockets
"""
def __init__(self, resolved_name, recv_data_class, queue_size=None, \
buff_size=DEFAULT_BUFF_SIZE, tcp_nodelay=False):
"""
ctor.
@param resolved_name: resolved subscription name
@type resolved_name: str
@param recv_data_class: class to instantiate to receive
messages
@type recv_data_class: L{rospy.Message}
@param queue_size: maximum number of messages to
deserialize from newly read data off socket
@type queue_size: int
@param buff_size: recv buffer size
@type buff_size: int
@param tcp_nodelay: If True, request TCP_NODELAY from publisher
@type tcp_nodelay: bool
"""
super(TCPROSSub, self).__init__(resolved_name, recv_data_class, queue_size, buff_size)
self.direction = rospy.impl.transport.INBOUND
self.tcp_nodelay = tcp_nodelay
def get_header_fields(self):
"""
@return: dictionary of subscriber fields
@rtype: dict
"""
return {'topic': self.resolved_name,
'message_definition': self.recv_data_class._full_text,
'tcp_nodelay': '1' if self.tcp_nodelay else '0',
'md5sum': self.recv_data_class._md5sum,
'type': self.recv_data_class._type,
'callerid': rospy.names.get_caller_id()}
# Separate method for easier testing
def _configure_pub_socket(sock, is_tcp_nodelay):
"""
Configure socket options on a new publisher socket.
@param sock: socket.socket
@type sock: socket.socket
@param is_tcp_nodelay: if True, TCP_NODELAY will be set on outgoing socket if available
@param is_tcp_nodelay: bool
"""
# #956: low latency, TCP_NODELAY support
if is_tcp_nodelay:
if hasattr(socket, 'TCP_NODELAY'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
logwarn("WARNING: cannot enable TCP_NODELAY as its not supported on this platform")
#TODO:POLLING: TCPROSPub currently doesn't actually do anything -- not until polling is implemented
class TCPROSPub(TCPROSTransportProtocol):
"""
Publisher transport implementation for publishing topic data via
peer-to-peer TCP/IP sockets.
"""
def __init__(self, resolved_name, pub_data_class, is_latch=False, headers=None):
"""
ctor.
@param resolved_name: resolved topic name
@type resolved_name: str
@param pub_data_class: class to instance to receive messages
@type pub_data_class: L{rospy.Message} class
@param is_latch: If True, Publisher is latching
@type is_latch: bool
"""
# very small buffer size for publishers as the messages they receive are very small
super(TCPROSPub, self).__init__(resolved_name, None, queue_size=None, buff_size=128)
self.pub_data_class = pub_data_class
self.direction = rospy.impl.transport.OUTBOUND
self.is_latch = is_latch
self.headers = headers if headers else {}
def get_header_fields(self):
base = {'topic': self.resolved_name,
'type': self.pub_data_class._type,
'latching': '1' if self.is_latch else '0',
'message_definition': self.pub_data_class._full_text,
'md5sum': self.pub_data_class._md5sum,
'callerid': rospy.names.get_caller_id() }
# this implementation allows the user to override builtin
# fields. this could potentially enable some interesting
# features... or it could be really bad.
if self.headers:
base.update(self.headers)
return base
def robust_connect_subscriber(conn, dest_addr, dest_port, pub_uri, receive_cb, resolved_topic_name):
"""
Keeps trying to create connection for subscriber. Then passes off to receive_loop once connected.
"""
# kwc: this logic is not very elegant. I am waiting to rewrite
# the I/O loop with async i/o to clean this up.
# timeout is really generous. for now just choosing one that is large but not infinite
interval = 0.5
while conn.socket is None and not conn.done and not rospy.is_shutdown():
try:
conn.connect(dest_addr, dest_port, pub_uri, timeout=60.)
except rospy.exceptions.TransportInitError as e:
rospyerr("unable to create subscriber transport: %s. Will try again in %ss", e, interval)
interval = interval * 2
time.sleep(interval)
# check to see if publisher state has changed
conn.done = not check_if_still_publisher(resolved_topic_name, pub_uri)
if not conn.done:
conn.receive_loop(receive_cb)
def check_if_still_publisher(resolved_topic_name, pub_uri):
try:
s = ServerProxy(pub_uri)
code, msg, val = s.getPublications(rospy.names.get_name())
if code == 1:
return len([t for t in val if t[0] == resolved_topic_name]) > 0
else:
return False
except:
return False
class TCPROSHandler(rospy.impl.transport.ProtocolHandler):
"""
ROS Protocol handler for TCPROS. Accepts both TCPROS topic
connections as well as ROS service connections over TCP. TCP server
socket is run once start_server() is called -- this is implicitly
called during init_publisher().
"""
def __init__(self):
"""ctor"""
self.tcp_nodelay_map = {} # { topic : tcp_nodelay}
def set_tcp_nodelay(self, resolved_name, tcp_nodelay):
"""
@param resolved_name: resolved topic name
@type resolved_name: str
@param tcp_nodelay: If True, sets TCP_NODELAY on publisher's
socket (disables Nagle algorithm). This results in lower
latency publishing at the cost of efficiency.
@type tcp_nodelay: bool
"""
self.tcp_nodelay_map[resolved_name] = tcp_nodelay
def shutdown(self):
"""
stops the TCP/IP server responsible for receiving inbound connections
"""
pass
def create_transport(self, resolved_name, pub_uri, protocol_params):
"""
Connect to topic resolved_name on Publisher pub_uri using TCPROS.
@param resolved_name str: resolved topic name
@type resolved_name: str
@param pub_uri: XML-RPC URI of publisher
@type pub_uri: str
@param protocol_params: protocol parameters to use for connecting
@type protocol_params: [XmlRpcLegal]
@return: code, message, debug
@rtype: (int, str, int)
"""
#Validate protocol params = [TCPROS, address, port]
if type(protocol_params) != list or len(protocol_params) != 3:
return 0, "ERROR: invalid TCPROS parameters", 0
if protocol_params[0] != TCPROS:
return 0, "INTERNAL ERROR: protocol id is not TCPROS: %s"%id, 0
id, dest_addr, dest_port = protocol_params
sub = rospy.impl.registration.get_topic_manager().get_subscriber_impl(resolved_name)
#Create connection
protocol = TCPROSSub(resolved_name, sub.data_class, \
queue_size=sub.queue_size, buff_size=sub.buff_size,
tcp_nodelay=sub.tcp_nodelay)
conn = TCPROSTransport(protocol, resolved_name)
conn.set_endpoint_id(pub_uri);
t = threading.Thread(name=resolved_name, target=robust_connect_subscriber, args=(conn, dest_addr, dest_port, pub_uri, sub.receive_callback,resolved_name))
# don't enable this just yet, need to work on this logic
#rospy.core._add_shutdown_thread(t)
t.start()
# Attach connection to _SubscriberImpl
if sub.add_connection(conn): #pass tcp connection to handler
return 1, "Connected topic[%s]. Transport impl[%s]"%(resolved_name, conn.__class__.__name__), dest_port
else:
conn.close()
return 0, "ERROR: Race condition failure: duplicate topic subscriber [%s] was created"%(resolved_name), 0
def supports(self, protocol):
"""
@param protocol: name of protocol
@type protocol: str
@return: True if protocol is supported
@rtype: bool
"""
return protocol == TCPROS
def get_supported(self):
"""
Get supported protocols
"""
return [[TCPROS]]
def init_publisher(self, resolved_name, protocol):
"""
Initialize this node to receive an inbound TCP connection,
i.e. startup a TCP server if one is not already running.
@param resolved_name: topic name
@type resolved__name: str
@param protocol: negotiated protocol
parameters. protocol[0] must be the string 'TCPROS'
@type protocol: [str, value*]
@return: (code, msg, [TCPROS, addr, port])
@rtype: (int, str, list)
"""
if protocol[0] != TCPROS:
return 0, "Internal error: protocol does not match TCPROS: %s"%protocol, []
start_tcpros_server()
addr, port = get_tcpros_server_address()
return 1, "ready on %s:%s"%(addr, port), [TCPROS, addr, port]
def topic_connection_handler(self, sock, client_addr, header):
"""
Process incoming topic connection. Reads in topic name from
handshake and creates the appropriate L{TCPROSPub} handler for the
connection.
@param sock: socket connection
@type sock: socket.socket
@param client_addr: client address
@type client_addr: (str, int)
@param header: key/value pairs from handshake header
@type header: dict
@return: error string or None
@rtype: str
"""
if rospy.core.is_shutdown_requested():
return "Node is shutting down"
for required in ['topic', 'md5sum', 'callerid']:
if not required in header:
return "Missing required '%s' field"%required
else:
resolved_topic_name = header['topic']
md5sum = header['md5sum']
tm = rospy.impl.registration.get_topic_manager()
topic = tm.get_publisher_impl(resolved_topic_name)
if not topic:
return "[%s] is not a publisher of [%s]. Topics are %s"%(rospy.names.get_caller_id(), resolved_topic_name, tm.get_publications())
elif not topic.data_class or topic.closed:
return "Internal error processing topic [%s]"%(resolved_topic_name)
elif md5sum != rospy.names.TOPIC_ANYTYPE and md5sum != topic.data_class._md5sum:
data_class = topic.data_class
actual_type = data_class._type
# check to see if subscriber sent 'type' header. If they did, check that
# types are same first as this provides a better debugging message
if 'type' in header:
requested_type = header['type']
if requested_type != actual_type:
return "topic types do not match: [%s] vs. [%s]"%(requested_type, actual_type)
else:
# defaults to actual type
requested_type = actual_type
return "Client [%s] wants topic [%s] to have datatype/md5sum [%s/%s], but our version has [%s/%s] Dropping connection."%(header['callerid'], resolved_topic_name, requested_type, md5sum, actual_type, data_class._md5sum)
else:
#TODO:POLLING if polling header is present, have to spin up receive loop as well
# #1334: tcp_nodelay support from subscriber option
if 'tcp_nodelay' in header:
tcp_nodelay = True if header['tcp_nodelay'].strip() == '1' else False
else:
tcp_nodelay = self.tcp_nodelay_map.get(resolved_topic_name, False)
_configure_pub_socket(sock, tcp_nodelay)
protocol = TCPROSPub(resolved_topic_name, topic.data_class, is_latch=topic.is_latch, headers=topic.headers)
transport = TCPROSTransport(protocol, resolved_topic_name)
transport.set_socket(sock, header['callerid'])
transport.remote_endpoint = client_addr
transport.write_header()
topic.add_connection(transport)
class QueuedConnection(object):
"""
It wraps a Transport instance and behaves like one
but it queues the data written to it and relays them
asynchronously to the wrapped instance.
"""
def __init__(self, connection, queue_size):
"""
ctor.
@param connection: the wrapped transport instance
@type connection: Transport
@param queue_size: the maximum size of the queue, zero means infinite
@type queue_size: int
"""
super(QueuedConnection, self).__init__()
self._connection = connection
self._queue_size = queue_size
self._lock = threading.Lock()
self._cond_data_available = threading.Condition(self._lock)
self._cond_queue_swapped = threading.Condition(self._lock)
self._queue = []
self._waiting = False
self._error = None
self._thread = threading.Thread(target=self._run)
self._thread.start()
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._connection, name)
def write_data(self, data):
with self._lock:
# if there was previously an error within the dispatch thread raise it
if self._error:
error = self._error
self._error = None
raise error
# pop oldest data if queue limit is reached
if self._queue_size > 0 and len(self._queue) == self._queue_size:
del self._queue[0]
self._queue.append(data)
self._cond_data_available.notify()
# ensure that thread has actually swapped the queues and is processig them
# if it was waiting for being notified
# to enforce behavior to be as close as possible to blocking
if self._waiting:
self._cond_queue_swapped.wait()
return True
def _run(self):
while not self._connection.done:
queue = []
with self._lock:
# wait for available data
while not self._queue and not self._connection.done:
self._waiting = True
self._cond_data_available.wait(1.0)
self._waiting = False
if self._queue:
self._cond_queue_swapped.notify()
# take all data from queue for processing outside of the lock
if self._queue:
queue = self._queue
self._queue = []
# relay all data
for data in queue:
try:
self._connection.write_data(data)
except Exception as e:
with self._lock:
self._error = e
|
ThreadIndexFiles.py
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
# This sample illustrates how to use a thread with PyLucene
INDEX_DIR = "ThreadIndexFiles.index"
import sys, os, threading, lucene
from datetime import datetime
from IndexFiles import IndexFiles
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.util import Version
if __name__ == '__main__':
if len(sys.argv) < 2:
print IndexFiles.__doc__
sys.exit(1)
env=lucene.initVM(vmargs=['-Djava.awt.headless=true'])
print 'lucene', lucene.VERSION
def fn():
base_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
env.attachCurrentThread()
start = datetime.now()
IndexFiles(sys.argv[1], os.path.join(base_dir, INDEX_DIR),
StandardAnalyzer(Version.LUCENE_CURRENT))
end = datetime.now()
print end - start
threading.Thread(target=fn).start()
|
fusekistore.py
|
from os.path import os
import threading
from urllib2 import HTTPError
import urllib2
from SPARQLWrapper import SPARQLWrapper, POST, JSON, SELECT, INSERT
from rdflib.graph import ConjunctiveGraph
import triplestoreadapter
from virtualisation.misc.jsonobject import JSONObject
from virtualisation.misc.log import Log as L
from SPARQLWrapper.SPARQLExceptions import EndPointInternalError,\
EndPointNotFound
__author__ = 'sefki'
__author__ = "thiggena"
class SPARQL_Exception(Exception):
pass
class StoreOffline_Exception(Exception):
pass
class FusekiStore(triplestoreadapter.TripleStoreAdapter):
SPARQL_ENDPOINT = None
GRAPH_BASE_URI = None
def __init__(self, triplestoreconfiguration):
if not FusekiStore.SPARQL_ENDPOINT:
FusekiStore.SPARQL_ENDPOINT = "http://%s:%d/" % (triplestoreconfiguration.host, triplestoreconfiguration.port)
if "path" in triplestoreconfiguration:
FusekiStore.SPARQL_ENDPOINT += triplestoreconfiguration.path
if not FusekiStore.GRAPH_BASE_URI:
FusekiStore.GRAPH_BASE_URI = triplestoreconfiguration.base_uri
self.dictIDs = {}
# prepares the SPARQL wrapper object
def getSparqlObject(self, graphName=None, query=None):
sparql = SPARQLWrapper(FusekiStore.SPARQL_ENDPOINT)
sparql.addDefaultGraph(self.getGraphURI(graphName))
sparql.setQuery(query)
sparql.setMethod(POST)
sparql.queryType = SELECT
sparql.setReturnFormat(JSON)
sparql.setTimeout(0.1)
return sparql
# deletes unusable characters within URI
def getGraphURI(self, graphName, prependBase=True):
if prependBase:
graphURI = FusekiStore.GRAPH_BASE_URI + graphName
else:
graphURI = graphName
return urllib2.quote(graphURI, "://#")
# returns if graph doesn't exist or is empty(!)
# there is no possibility to check for existing but empty graphs
def graphExists(self, graphName):
queryString = "ASK { GRAPH <" + self.getGraphURI(graphName) + "> { ?s ?p ?o . }}"
sparql = self.getSparqlObject(graphName, queryString)
# print queryString
try:
ret = sparql.query()
retList = ret.convert()
# print retList
return retList["boolean"]
except HTTPError as e:
L.e("Sparql Endpoint HTTPError in graphExists:", str(e.code), e.reason)
except Exception as e:
L.e("Error in graphExists:", e.message)
# creates an empty graph for the new dataset
def createGraph(self, graphName):
queryString = "CREATE GRAPH <" + self.getGraphURI(graphName) + ">"
sparql = self.getSparqlObject(graphName, queryString)
try:
ret = sparql.query().convert()
return True
except HTTPError as e:
L.e("Sparql Endpoint HTTPError in createGraph:", str(e.code), e.reason)
except Exception as e:
L.e("Error in createGraph:", e.message)
return False
def saveTriple(self, graphName, subject, predicate, object):
sparql = self.getSparqlObject(graphName)
# insert into doesn't work with set default graph, have to "... INSERT DATA INTO <graph>..."
queryString = "INSERT DATA INTO <" + self.getGraphURI(
graphName) + "> { <" + subject + "> <" + predicate + "> <" + object + "> }"
sparql.setQuery(queryString)
try:
sparql.query()
except HTTPError as e:
L.e("Sparql Endpoint HTTPError in saveTriple:", str(e.code), e.reason)
except Exception as e:
L.e("Error in saveTriple:", e.message)
def saveGraph(self, graph, graphName):
serialisation = graph.serialize(destination=None, format='nt', encoding=None)
queryString = "".join(["INSERT DATA { GRAPH <", self.getGraphURI(graphName), "> {", serialisation, "}}"])
sparql = self.getSparqlObject(graphName, queryString)
sparql.queryType = INSERT
try:
sparql.query()
except HTTPError as e:
L.e("Sparql Endpoint HTTPError in saveGraph:", str(e.code), e.reason)
except Exception as e:
L.e("Error in saveGraph:", e.message)
def saveGraphAsync(self, graph=None, graphName=None):
threading.Thread(target=self.saveGraph, args=(graph, graphName)).start()
def saveMultipleGraphs(self, serialisedGraph, graphName=None):
queryString = "".join(["INSERT DATA { GRAPH <", self.getGraphURI(graphName), "> {", serialisedGraph, "}}"])
sparql = self.getSparqlObject(graphName, queryString)
sparql.queryType = INSERT
try:
ret = sparql.query()
except EndPointInternalError as e: #transaction deadlock case
raise SPARQL_Exception()
except EndPointNotFound as e: #temporarily 404 error
raise SPARQL_Exception()
except Exception as e:
L.e("Error in saveMultipleGraphs:", e.message)
raise StoreOffline_Exception()
def getObservationGraph(self, graphName, sensor, start = None, end = None, asGraph=True):
dateFilter = ""
if start and end:
dateFilter = "FILTER ( (xsd:dateTime(?resultTimeValue) >= xsd:dateTime(\"" + start + "\")) && (xsd:dateTime(?resultTimeValue) <= xsd:dateTime(\"" + end + "\")) ) "
elif start:
dateFilter = "FILTER ( (xsd:dateTime(?resultTimeValue) >= xsd:dateTime(\"" + start + "\")) ) "
queryString = """prefix : <http://stefan.com/>
prefix sao: <http://purl.oclc.org/NET/UNIS/sao/sao#>
prefix ssn: <http://purl.oclc.org/NET/ssnx/ssn#>
prefix tl: <http://purl.org/NET/c4dm/timeline.owl#>
CONSTRUCT {?s ?p ?o}
where {
{
?observation (!:)* ?s .
?s ?p ?o .
}
{
?observation a sao:Point.
?observation ssn:observationResultTime ?resultTime .
?resultTime tl:at ?resultTimeValue .
?observation ssn:observedBy <""" + sensor + """> .""" + dateFilter + """}}"""
sparql = self.getSparqlObject(graphName, queryString)
sparql.setReturnFormat("n3")
try:
ret = sparql.query().convert()
if not asGraph:
return ret
else:
g = ConjunctiveGraph()
return g.parse(data=ret, format="n3")
except Exception as e:
L.e("Error in getObservationGraph:", e.message)
return None
def deleteGraph(self, graphName):
queryString = "DEFINE sql:log-enable 3 DROP SILENT GRAPH <" + self.getGraphURI(graphName) + ">"
queryString = "DROP GRAPH <" + self.getGraphURI(graphName) + ">"
L.d("deleteGraph using query:", queryString)
sparql = self.getSparqlObject(graphName, queryString)
sparql.queryType = INSERT
sparql.setTimeout(300)
try:
ret = sparql.query()
return True
except Exception as e:
L.e("Error in deleteGraph:", e.message)
return False
#sensorNames have to be in the same graph!
def getLastQoIData_List(self, graphName, sensorNames, start=None, end=None):
length = len(sensorNames)
i = 1
sensorFilter = "FILTER("
for sensor in sensorNames:
if i < length:
sensorFilter = "".join([sensorFilter, "?sensor = <", sensor, "> || "])
else:
sensorFilter = "".join([sensorFilter, "?sensor = <", sensor, "> )"])
i += 1
dateFilter = ""
limit = ""
if start and end:
dateFilter = "FILTER ( (xsd:dateTime(?resultTimeValue) >= xsd:dateTime(\"" + start + "\")) && (xsd:dateTime(?resultTimeValue) <= xsd:dateTime(\"" + end + "\")) ) "
elif start:
dateFilter = "FILTER ( (xsd:dateTime(?resultTimeValue) >= xsd:dateTime(\"" + start + "\")) ) "
else:
limit = "LIMIT " + str(length)
queryString = """prefix ssn: <http://purl.oclc.org/NET/ssnx/ssn#> prefix tl: <http://purl.org/NET/c4dm/timeline.owl#>
prefix sao: <http://purl.oclc.org/NET/UNIS/sao/sao#>
prefix ces: <http://www.insight-centre.org/ces#> prefix so: <http://www.daml.org/services/owl-s/1.2/Service.owl#>
prefix qoi: <http://purl.oclc.org/NET/UASO/qoi#>
prefix prov: <http://www.w3.org/ns/prov#>
SELECT
distinct(?sensor) ?absoluteAge ?ratedAge ?unitAge ?absoluteCompleteness ?ratedCompleteness ?unitCompleteness
?absoluteCorrectness ?ratedCorrectness ?unitCorrectness ?absoluteFrequency ?ratedFrequency ?unitFrequency
?absoluteLatency ?ratedLatency ?unitLatency ?resultTimeValue
WHERE {
?quality1 qoi:hasAbsoluteQuality ?absoluteAge .
?quality1 qoi:hasRatedQuality ?ratedAge .
?quality1 qoi:hasUnitOfMeasurement ?unitAge .
?quality2 qoi:hasAbsoluteQuality ?absoluteCompleteness .
?quality2 qoi:hasRatedQuality ?ratedCompleteness .
?quality2 qoi:hasUnitOfMeasurement ?unitCompleteness .
?quality3 qoi:hasAbsoluteQuality ?absoluteCorrectness .
?quality3 qoi:hasRatedQuality ?ratedCorrectness .
?quality3 qoi:hasUnitOfMeasurement ?unitCorrectness .
?quality4 qoi:hasAbsoluteQuality ?absoluteFrequency .
?quality4 qoi:hasRatedQuality ?ratedFrequency .
?quality4 qoi:hasUnitOfMeasurement ?unitFrequency .
?quality5 qoi:hasAbsoluteQuality ?absoluteLatency .
?quality5 qoi:hasRatedQuality ?ratedLatency .
?quality5 qoi:hasUnitOfMeasurement ?unitLatency .
?quality1 a qoi:Age .
?quality2 a qoi:Completeness .
?quality3 a qoi:Correctness .
?quality4 a qoi:Frequency .
?quality5 a qoi:Latency .
?observation qoi:hasQuality ?quality1 .
?observation qoi:hasQuality ?quality2 .
?observation qoi:hasQuality ?quality3 .
?observation qoi:hasQuality ?quality4 .
?observation qoi:hasQuality ?quality5 .
?observation ssn:observationResultTime ?time .
?observation ssn:observedBy ?sensor .
?time tl:at ?resultTimeValue . """ + sensorFilter + " " + dateFilter + """
} ORDER BY DESC(?resultTimeValue) """ + limit
sparql = self.getSparqlObject(graphName, queryString)
try:
ret = sparql.query().convert()
return ret
except Exception as e:
L.e("Error in getQoIData:", e.message)
def getStreamMinMaxDate(self, graphName, sensorName):
queryString = """prefix ssn: <http://purl.oclc.org/NET/ssnx/ssn#> prefix tl: <http://purl.org/NET/c4dm/timeline.owl#>
prefix sao: <http://purl.oclc.org/NET/UNIS/sao/sao#> prefix ces: <http://www.insight-centre.org/ces#>
prefix so: <http://www.daml.org/services/owl-s/1.2/Service.owl#> prefix qoi: <http://purl.oclc.org/NET/UASO/qoi#>
prefix prov: <http://www.w3.org/ns/prov#>
SELECT MAX(str(?timeValue)) as ?maxDateTime MIN(str(?timeValue)) as ?minDateTime
WHERE { ?observation ssn:observationResultTime ?time . ?observation ssn:observedBy <""" + sensorName + """> . ?time tl:at ?timeValue . }"""
sparql = self.getSparqlObject(graphName, queryString)
try:
ret = sparql.query().convert()
return ret
except Exception as e:
L.e("Error in getStreamMinMaxDate:", e.message)
if __name__ == '__main__':
from virtualisation.triplestore.triplestorefactory import TripleStoreFactory
config = JSONObject(file(os.path.join(os.path.dirname(__file__), "..", "config.json"), "rb"))
tripleStore = TripleStoreFactory.getTripleStore(config.triplestore.driver, config.triplestore)
# x = JSONObject()
#
print tripleStore.getObservationGraph2("aarhus_road_parking#", "http://ict-citypulse.eu/SensorID-9ae902fb-232b-5ea8-b7be-34d60d563112", "2015-10-09T17:27:38", "2015-10-10T17:29:28", asGraph=False)
# print x.dumps()
# sensorNames = ["http://ict-citypulse.eu/SensorID-4a838c4b-30d0-5fb4-b3b5-16d6c5c4ff9f", "http://ict-citypulse.eu/SensorID-d281e004-dfac-56c4-b237-d3b854c63558", \
# "http://ict-citypulse.eu/SensorID-4a838c4b-30d0-5fb4-b3b5-16d6c5c4ff9f", "http://ict-citypulse.eu/SensorID-4ebf0933-b115-5e44-98e7-2432325395a1", \
# "http://ict-citypulse.eu/SensorID-2586bf63-d256-59a0-bc29-9d04eb92dacb", "http://ict-citypulse.eu/SensorID-fb6c280f-1daa-56ea-a984-bfc2ae79d835", \
# "http://ict-citypulse.eu/SensorID-51f0f28c-0909-5a83-a310-b6bd686bf57b", "http://ict-citypulse.eu/SensorID-d281e004-dfac-56c4-b237-d3b854c63558"]
# sensorNames = ["http://ict-citypulse.eu/SensorID-4a838c4b-30d0-5fb4-b3b5-16d6c5c4ff9f"]
#
#
# import datetime
# for _i in range(0,2):
# tstart = datetime.datetime.now()
# for sensor in sensorNames:
# tripleStore.getLastQoIData("aarhus_road_parking#", sensor)
# print "Testing time for getLastQoIData:", (datetime.datetime.now() - tstart).total_seconds()
# tstart = datetime.datetime.now()
# print tripleStore.getLastQoIData_List("aarhus_road_parking#", sensorNames, "2016-10-13T13:40:01", "2015-10-13T13:42:01")
# print "Testing time for getLastQoIData2:", (datetime.datetime.now() - tstart).total_seconds()
# print tripleStore.getQoIData("aarhus_road_parking#", "http://ict-citypulse.eu/SensorID-0816d088-3af8-540e-b89b-d99ac63fa886", "2015-10-08T10:35:01", "2015-10-08T10:40:01")
# tripleStore.createGraph("test")
# tripleStore.deleteGraph("test")
# tripleStore.createGraph("test")
# tripleStore.deleteGraph("test")
# tripleStore.createGraph("test2")
# tripleStore.deleteGraph("test2")
# tripleStore.createGraph("test")
# import datetime
# tstart = datetime.datetime.now()
# tripleStore.deleteGraph("aarhus_road_traffic#")
# print "Testing timeout for delete TIME:", (datetime.datetime.now() - tstart).total_seconds()
# print tripleStore.getLastQoIData("aarhus_road_parking#", "http://ict-citypulse.eu/SensorID-4a838c4b-30d0-5fb4-b3b5-16d6c5c4ff9f")
# print tripleStore.getQoIData("aarhus_road_parking#", "http://ict-citypulse.eu/SensorID-4a838c4b-30d0-5fb4-b3b5-16d6c5c4ff9f", start="2015-09-01T10:42:50", end="2015-09-01T11:42:55")
#
# print tripleStore.getStreamMinMaxDate("aarhus_road_parking#", "http://ict-citypulse.eu/SensorID-4a838c4b-30d0-5fb4-b3b5-16d6c5c4ff9f")
|
update.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from re import sub
import sublime_plugin
from ..api import deviot
from ..libraries.messages import Messages
from ..libraries.thread_progress import ThreadProgress
class DeviotCheckPioUpdatesCommand(sublime_plugin.WindowCommand):
def run(self):
Update().check_update_async()
class DeviotUpdatePioCommand(sublime_plugin.WindowCommand):
def run(self):
Update().update_async()
class DeviotDevPioCommand(sublime_plugin.WindowCommand):
def run(self):
Update().developer_async()
class Update:
"""Update PlatFormIO
Class to upgrade platformIO (update_pio) or install the
developer branch (developer_pio) to avoid block the sublime
text UI both function are run in a separate thread (async)
update_asyc, developer_async
"""
def __init__(self):
super(Update, self).__init__()
self.cwd = None
self.dprint = None
self.env_paths = deviot.get_sysetting('env_paths', False)
def show_feedback(self):
messages = Messages()
messages.initial_text("_deviot_{0}", deviot.version())
messages.create_panel()
self.dprint = messages.print
def update_pio(self):
"""Update PlatformIO
Update platformIO to the last version (block thread)
"""
self.show_feedback()
self.dprint('searching_pio_updates')
cmd = deviot.pio_command(['upgrade'])
out = deviot.run_command(cmd)
self.dprint(out[1])
def update_async(self):
"""New Thread Execution
Starts a new thread to run the update_pio method
"""
from threading import Thread
thread = Thread(target=self.update_pio)
thread.start()
ThreadProgress(thread, 'processing', '')
def developer_async(self):
"""New Thread Execution
Starts a new thread to run the developer_pio method
"""
from threading import Thread
thread = Thread(target=self.developer_pio)
thread.start()
ThreadProgress(thread, 'processing', '')
def developer_pio(self):
"""Developer
Uninstall the current version of platformio and install
a version based in the preference of the user, it can be
the stable or developer version
"""
self.show_feedback()
self.dprint('uninstall_old_pio')
cmd = ['pip', 'uninstall', '--yes', 'platformio']
out = deviot.run_command(cmd)
developer = deviot.get_sysetting('pio_developer', False)
if(not developer):
self.dprint('installing_dev_pio')
option = 'https://github.com/platformio/' \
'platformio/archive/develop.zip'
else:
self.dprint('installing_stable_pio')
option = 'platformio'
cmd = deviot.prepare_command(['pip', 'install', '-U', option])
out = deviot.run_command(cmd)
if(out[0] == 0):
self.dprint('button_ok')
deviot.save_sysetting('pio_developer', not developer)
else:
self.dprint('setup_error')
def check_update_async(self):
"""New Thread Execution
Starts a new thread to run the check_update method
"""
from threading import Thread
thread = Thread(target=self.check_update)
thread.start()
ThreadProgress(thread, 'processing', '')
def check_update(self):
"""Check update
Checks for platformio updates each 5 days.
To know what is the last version of platformio
pypi is checked
"""
installed = deviot.get_sysetting('installed', False)
if(not installed):
return
from datetime import datetime, timedelta
date_now = datetime.now()
last_check = deviot.get_sysetting('last_check_update', False)
try:
last_check = datetime.strptime(last_check, '%Y-%m-%d %H:%M:%S.%f')
if(date_now < last_check):
return
except TypeError:
pass
if(not last_check or date_now > last_check):
last_check = date_now + timedelta(5, 0) # 5 days
deviot.save_sysetting('last_check_update', str(last_check))
cmd = deviot.pio_command(['--version'])
out = deviot.run_command(cmd, env_paths=self.env_paths)
pio_version = int(sub(r'\D', '', out[1]))
last_pio_version = self.online_pio_version()
if(pio_version < last_pio_version):
from sublime import ok_cancel_dialog
from ..libraries.I18n import I18n
translate = I18n().translate
update = ok_cancel_dialog(translate('new_pio_update{0}{1}',
last_pio_version,
pio_version),
translate('update_button'))
if(update):
self.show_feedback()
self.update_pio()
def online_pio_version(self):
from urllib.request import Request
from urllib.request import urlopen
from urllib.error import HTTPError
from json import loads
try:
url = 'https://pypi.python.org/pypi/platformio/json'
req = Request(url, headers=deviot.header())
response = urlopen(req)
pypi_list = loads(response.read().decode())
last_pio_version = pypi_list['info']['version']
except (KeyError, HTTPError) as e:
return 0
return int(sub(r'\D', '', last_pio_version))
|
tab.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
from webfriend.rpc import (
Base,
Browser,
Console,
DOM,
Emulation,
Input,
Network,
Overlay,
Page,
Reply,
Runtime,
Target,
)
import json
import time
from webfriend import exceptions
from webfriend.utils import patch_json # noqa
import websocket
import logging
from Queue import Queue, Empty, Full
from threading import Thread
ANY_KEY = 'ANY'
class Tab(object):
default_width = 0
default_height = 0
def __init__(
self,
browser,
description,
domains=None,
width=None,
height=None,
frame_id=None,
callbacks=True,
autoresize=True
):
if not isinstance(description, dict):
raise AttributeError("Tab descriptor must be a dict")
if 'webSocketDebuggerUrl' not in description:
raise AttributeError("Cannot operate on tab without a webSocketDebuggerUrl")
if width is None:
width = self.default_width
if height is None:
height = self.default_height
self.browser = browser
self.frame_id = frame_id
self.description = description
self.message_id = 0
self.socket = websocket.create_connection(self.wsurl)
self.waiters = {}
self.triggerqueue = Queue()
self.last_event_m = {}
self.last_event_t = {}
self._network_requests = {}
self.g_recv_ctl = Queue(1)
self.g_recv = Thread(target=self.receive_messages, args=(self.g_recv_ctl,))
self.replies = {}
self.initial_w = width
self.initial_h = height
self.msg_enable = False
self.netreq_tracking = True
self._trigger_worker = None
# setup and enable all the RPC domains we support
self.page = Page(self)
self.dom = DOM(self)
self.console = Console(self)
self.emulation = Emulation(self)
self.input = Input(self)
self.network = Network(self)
self.runtime = Runtime(self)
self.window = Browser(self)
self.overlay = Overlay(self)
self.target = Target(self)
# start the receive thread
self.g_recv.start()
for domain in self.rpc_domains:
domain.initialize()
# setup internal callbacks
if callbacks:
self.setup_callbacks()
# perform initial calls
if autoresize:
if self.initial_w or self.initial_h:
self.emulation.set_device_metrics_override(
width=self.initial_w,
height=self.initial_h,
)
@property
def url(self):
return self.description.get('url')
@property
def wsurl(self):
return self.description.get('webSocketDebuggerUrl')
@property
def rpc_domains(self):
instances = []
for k in dir(self):
if k == 'rpc_domains':
continue
attr = getattr(self, k)
if isinstance(attr, Base):
instances.append(attr)
return instances
def as_dict(self):
return {
'id': self.frame_id,
'url': self.url,
'webSocketDebuggerUrl': self.wsurl,
'target': (self.frame_id == self.browser.default_tab),
}
def enable_events(self):
for domain in self.rpc_domains:
domain.enable()
def enable_console_messages(self):
self.msg_enable = True
def disable_console_messages(self):
self.msg_enable = False
def enable_network_request_tracking(self):
self.netreq_tracking = True
def disable_network_request_tracking(self):
self.netreq_tracking = False
def stop(self):
if self.g_recv.is_alive():
logging.debug('Sending stop to receive thread')
self.g_recv_ctl.put(StopIteration)
self.socket.close()
while self.g_recv.is_alive():
logging.debug('Waiting for receive thread...')
time.sleep(1)
def send(self, data, expect_reply=True, reply_timeout=None, context=None):
if not isinstance(data, dict):
raise AttributeError("Data must be a dict")
if not reply_timeout:
reply_timeout = 10000
# increment and include message ID
self.message_id += 1
data['id'] = self.message_id
body = json.dumps(data)
try:
request_handle = {
'id': data['id'],
'reply': Queue(1),
}
self.replies[data['id']] = request_handle
# send the request to the Remote Debugger
logging.debug(' >> [{:04d}] {} {}'.format(
data['id'],
data['method'],
' '.join([
'{}={}'.format(k, v) for k, v in data.get('params', {}).items()
])
))
# send the request
self.socket.send(body)
# block until the receive loop says so
if expect_reply:
try:
reply, events = request_handle['reply'].get(timeout=(reply_timeout / 1e3))
except Empty:
raise exceptions.TimeoutError("Timed out waiting for reply to command '{}', id={}".format(
data['method'],
data['id']
))
# if there was an exception, raise it now
if isinstance(reply, Exception):
raise reply
# make sure the IDs match
if reply['id'] == data['id']:
return Reply(reply, request=data, events=events)
else:
raise exceptions.ProtocolError("Reply Message ID does not match Request Message ID")
else:
return None
finally:
del self.replies[data['id']]
def dispatch_event(self, message):
if message is StopIteration:
logging.info('Sending stop to trigger thread')
self.triggerqueue.put((None, None, StopIteration))
else:
domain, method = message.get('method').split('.', 1)
payload = message.get('params', {})
try:
proxy = self.get_domain_instance(domain)
except ValueError:
logging.exception('Unhandled Event Type')
return
self.triggerqueue.put((proxy, method, payload))
def trigger_worker(self):
while True:
proxy, method, payload = self.triggerqueue.get()
if payload is StopIteration:
logging.debug('Stopping trigger thread')
return
event = proxy.trigger(method, payload)
event_name = str(event)
if event:
logging.debug(' >> [ .. ] EVENT: {}'.format(
event
))
# record the current time as the last time we saw an event of this type
now = time.time()
self.last_event_m[str(event)] = now
self.last_event_t = now
# attempt to send this event to whoever is waiting for it
if event_name in self.waiters:
try:
self.waiters[event_name].put(event)
except Full:
pass
if ANY_KEY in self.waiters:
try:
self.waiters[ANY_KEY].put(event)
except Full:
pass
def dispatch_reply(self, request_id, message, events):
if request_id in self.replies:
self.replies[request_id]['reply'].put((message, events))
else:
logging.warning('Received message without a sender (id={})'.format(request_id))
def receive_messages(self, controlq):
self._trigger_worker = Thread(target=self.trigger_worker)
self._trigger_worker.start()
while True:
try:
try:
if controlq.get_nowait() is StopIteration:
raise
except Empty:
pass
message = self.receive()
if message is None:
continue
# print(json.dumps(message, indent=4))
if isinstance(message, Exception):
self.dispatch_reply(message.id, message, [])
elif 'id' in message:
self.dispatch_reply(message['id'], message, [])
else:
self.dispatch_event(message)
except (KeyboardInterrupt, StopIteration, websocket.WebSocketException) as e:
logging.debug('Fatal receive message: {}'.format(e))
break
self.dispatch_event(StopIteration)
logging.info('Stopping receive thread')
def receive(self, timeout=10):
message = self.socket.recv()
if message is not None:
body = json.loads(message)
exc = None
if 'error' in body:
if isinstance(body['error'], dict):
error = body['error']
message = error.get('message', 'Unknown Error')
if 'data' in error:
message += ' - {}'.format(error['data'])
exc = exceptions.ProtocolError(
'Protocol Error {}: {}'.format(error.get('code', -1), message)
)
else:
exc = exceptions.ProtocolError('Malformed Error Response')
if exc is not None:
exc.id = body.get('id')
return exc
return body
else:
return None
def wait_for_caller_response(self, event_name, timeout=30000):
"""
Yields events of
Block until a specific event is received, or until **timeout** elapses (whichever comes first).
#### Arguments
- **event_name** (`str`):
The name of the event to wait for.
- **timeout** (`int`):
The timeout, in milliseconds, before raising a `webfriend.exceptions.TimeoutError`.
#### Returns
`webfriend.rpc.event.Event`
#### Raises
`webfriend.exceptions.TimeoutError`
"""
# get or create the async result for this event
if event_name not in self.waiters:
result = Queue(1)
self.waiters[event_name] = result
else:
result = self.waiters[event_name]
try:
event = result.get(timeout=(timeout / 1e3))
accepted = yield event
# generator received a response from the caller
if accepted is not None:
return
except Empty:
raise exceptions.TimeoutError("Timed out waiting for events")
finally:
del self.waiters[event_name]
def wait_for(self, event_name, **kwargs):
"""
Block until a specific event is received, or until **timeout** elapses (whichever comes first).
#### Arguments
- **event_name** (`str`):
The name of the event to wait for.
- **timeout** (`int`):
The timeout, in milliseconds, before raising a `webfriend.exceptions.TimeoutError`.
#### Returns
`webfriend.rpc.event.Event`
#### Raises
`webfriend.exceptions.TimeoutError`
"""
wfc = self.wait_for_caller_response(event_name, **kwargs)
started_at = time.time()
for event in wfc:
try:
wfc.send(True)
except StopIteration:
pass
return {
'sequence': [event],
'duration': (time.time() - started_at),
}
def wait_for_idle(self, idle, events=[], timeout=30000, poll_interval=250):
"""
Blocks for a specified amount of time _after_ an event has been received, or until
**timeout** elapses (whichever comes first).
This is useful for waiting for events to occur after performing an action, then giving some
amount of time for those events to "settle" (e.g.: allowing the page time to react to those
events without knowing ahead of time what, if any, listeners will be responding.) A common
use case for this would be to wait a few seconds _after_ a resize has occurred for anything
that just loaded to finish doing so.
#### Arguments
- **idle** (`int`):
The amount of time, in milliseconds, that the event stream should be idle before
returning.
- **events** (`list`, optional):
If not empty, the **idle** time will be interpreted as the amount of time since _any
of these specific events_ have occurred. The default is to wait for the browser to be
idle with respect to _any_ events.
- **timeout** (`int`):
The maximum amount of time to wait before raising a
`webfriend.exceptions.TimeoutError`.
- **poll_interval** (`int`):
How often to check the event timings to see if the idle time has elapsed.
#### Returns
An `int` representing the number of milliseconds we waited for.
#### Raises
`webfriend.exceptions.TimeoutError`
"""
started_at = time.time()
idle = (idle / 1e3)
# clear out the old timings we're interested in
if len(events):
for name in events:
if name in self.last_event_m:
del self.last_event_m[name]
while time.time() < (started_at + (timeout / 1e3)):
# if we don't have an event filter, then we just want to wait idle seconds
# after ANY event has been received
if not len(events):
# if the time since ANY event has met/exceeded our idle time
if time.time() >= (self.last_event_t + idle):
return (time.time() - started_at) * 1e3
else:
for name in events:
if name in self.last_event_m:
# if the time since this event has met/exceeded our idle time
if time.time() >= (self.last_event_m[name] + idle):
# now that we've gotten the event, remove it so subsequent calls wait
# for the next one to occur
del self.last_event_m[name]
return (time.time() - started_at) * 1e3
time.sleep(poll_interval / 1e3)
raise exceptions.TimeoutError("Timed out waiting for events to stop coming in")
def evaluate(self, *args, **kwargs):
return self.runtime.evaluate(*args, **kwargs)
def rpc(self, method, expect_reply=True, reply_timeout=None, context=None, **params):
payload = {
'method': method,
}
if len(params):
payload['params'] = params
return self.send(
payload,
expect_reply=expect_reply,
reply_timeout=reply_timeout,
context=context
)
def get_domain_instance(self, domain):
for attr in dir(self):
instance = getattr(self, attr)
if isinstance(instance, Base):
if instance.domain == domain:
return instance
raise ValueError("No such instance for domain '{}'".format(domain))
def on(self, event_pattern, callback):
domain, _ = event_pattern.split('.', 1)
instance = self.get_domain_instance(domain)
return instance.on(event_pattern, callback)
def remove_handler(self, callback_id):
domain, _ = callback_id.split('.', 1)
instance = self.get_domain_instance(domain)
return instance.remove_handler(callback_id)
def reset_network_request_cache(self):
self._network_requests = {}
def get_network_request(self, request_id):
return self._network_requests.get(request_id)
def setup_callbacks(self):
def on_net_pre_request(e):
self._network_requests[e.get('requestId')] = {
'id': e.get('requestId'),
'before': e,
}
def on_net_response_received(e):
nid = e.get('requestId')
if isinstance(self._network_requests.get(nid), dict):
self._network_requests[nid]['success'] = True
self._network_requests[nid]['completed'] = True
self._network_requests[nid]['response'] = e
def on_net_load_failed(e):
nid = e.get('requestId')
if isinstance(self._network_requests.get(nid), dict):
self._network_requests[nid]['success'] = False
self._network_requests[nid]['response'] = e
def on_message(e):
message = e.get('message', {})
level = message.get('level', 'log')
body = message.get('text', '').strip()
if isinstance(body, str):
body = body.encode('UTF-8')
if len(body):
lvlident = '--'
if level == 'info':
lvlident = 'II'
elif level == 'warning':
lvlident = 'WW'
elif level == 'error':
lvlident = 'EE'
elif level == 'debug':
lvlident = 'DD'
logging.info('[{}] {}'.format(lvlident, body))
if self.netreq_tracking:
self.network.on('requestWillBeSent', on_net_pre_request)
self.network.on('responseReceived', on_net_response_received)
self.network.on('loadingFailed', on_net_load_failed)
logging.debug('Network request tracking is enabled')
else:
logging.debug('Network request tracking is disabled')
if self.msg_enable:
self.console.on('messageAdded', on_message)
|
release.py
|
#!/usr/bin/python
import re
import sys
import os
import os.path
import subprocess
import shutil
import tempfile
from datetime import *
from multiprocessing import Process
from utils import *
try:
from xml.etree.ElementTree import ElementTree
except:
prettyprint('''
Welcome to the Infinispan Release Script.
This release script requires that you use at least Python 2.5.0. It appears
that you do not have the ElementTree XML APIs available, which are available
by default in Python 2.5.0.
''', Levels.FATAL)
sys.exit(1)
modules = []
uploader = None
git = None
def help_and_exit():
prettyprint('''
Welcome to the Infinispan Release Script.
%s Usage:%s
$ bin/release.py <version> <branch to tag from> <--mvn-only>
%s E.g.,%s
$ bin/release.py 6.1.1.Beta1 %s<-- this will tag off master.%s
$ bin/release.py 6.1.1.Beta1 6.1.x %s<-- this will use the appropriate branch.%s
$ bin/release.py 6.1.1.Beta1 6.1.x --mvn-only %s<-- this will only tag and release to maven (no dstribution).%s
''' % (Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color()), Levels.INFO)
sys.exit(0)
def validate_version(version):
version_pattern = get_version_pattern()
if version_pattern.match(version):
return version.strip()
else:
prettyprint("Invalid version '"+version+"'!\n", Levels.FATAL)
help_and_exit()
def tag_release(version, branch):
if git.remote_branch_exists():
git.switch_to_branch()
git.create_tag_branch()
else:
prettyprint("Branch %s cannot be found on upstream repository. Aborting!" % branch, Levels.FATAL)
sys.exit(100)
def get_project_version_tag(tree):
return tree.find("./{%s}version" % (maven_pom_xml_namespace))
def get_parent_version_tag(tree):
return tree.find("./{%s}parent/{%s}version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def get_properties_version_tag(tree):
return tree.find("./{%s}properties/{%s}project-version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def write_pom(tree, pom_file):
tree.write("tmp.xml", 'UTF-8')
in_f = open("tmp.xml")
out_f = open(pom_file, "w")
try:
for l in in_f:
newstr = l.replace("ns0:", "").replace(":ns0", "").replace("ns1", "xsi")
out_f.write(newstr)
finally:
in_f.close()
out_f.close()
os.remove("tmp.xml")
if settings['verbose']:
prettyprint(" ... updated %s" % pom_file, Levels.INFO)
def patch(pom_file, version):
'''Updates the version in a POM file. We need to locate //project/parent/version, //project/version and
//project/properties/project-version and replace the contents of these with the new version'''
if settings['verbose']:
prettyprint("Patching %s" % pom_file, Levels.DEBUG)
tree = ElementTree()
tree.parse(pom_file)
need_to_write = False
tags = []
tags.append(get_parent_version_tag(tree))
tags.append(get_project_version_tag(tree))
tags.append(get_properties_version_tag(tree))
for tag in tags:
if tag != None and "-SNAPSHOT" in tag.text:
if settings['verbose']:
prettyprint("%s is %s. Setting to %s" % (str(tag), tag.text, version), Levels.DEBUG)
tag.text=version
need_to_write = True
if need_to_write:
# write to file again!
write_pom(tree, pom_file)
return True
else:
if settings['verbose']:
prettyprint("File doesn't need updating; nothing replaced!", Levels.DEBUG)
return False
def get_poms_to_patch(working_dir):
poms_to_patch = [working_dir + "/pom.xml"]
return poms_to_patch
def update_versions(base_dir, version):
os.chdir(base_dir)
poms_to_patch = get_poms_to_patch(".")
modified_files = []
for pom in poms_to_patch:
if patch(pom, version):
modified_files.append(pom)
pieces = re.compile('[\.\-]').split(version)
snapshot = pieces[3]=='SNAPSHOT'
final = pieces[3]=='Final'
# Now make sure this goes back into the repository.
git.commit(modified_files, "'Release Script: update versions for %s'" % version)
# And return the next version
if final:
return pieces[0] + '.' + pieces[1] + '.' + str(int(pieces[2])+ 1) + '-SNAPSHOT'
else:
return None
def get_module_name(pom_file):
tree = ElementTree()
tree.parse(pom_file)
return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
def do_task(target, args, async_processes):
if settings['multi_threaded']:
async_processes.append(Process(target = target, args = args))
else:
target(*args)
### This is the starting place for this script.
def release():
global settings
global uploader
global git
assert_python_minimum_version(2, 5)
require_settings_file()
# We start by determining whether the version passed in is a valid one
if len(sys.argv) < 2:
help_and_exit()
base_dir = os.getcwd()
version = validate_version(sys.argv[1])
branch = "master"
mvn_only = False
if len(sys.argv) > 2:
if sys.argv[2].startswith("--mvn-only"):
mvn_only = True
else:
branch = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3].startswith("--mvn-only"):
mvn_only = True
else:
prettyprint("Unknown argument %s" % sys.argv[3], Levels.WARNING)
help_and_exit()
prettyprint("Releasing Infinispan Redis CacheStore version %s from branch '%s'" % (version, branch), Levels.INFO)
sure = input_with_default("Are you sure you want to continue?", "N")
if not sure.upper().startswith("Y"):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
prettyprint("OK, releasing! Please stand by ...", Levels.INFO)
## Set up network interactive tools
if settings['dry_run']:
# Use stubs
prettyprint("*** This is a DRY RUN. No changes will be committed. Used to test this release script only. ***", Levels.DEBUG)
prettyprint("Your settings are %s" % settings, Levels.DEBUG)
uploader = DryRunUploader()
else:
uploader = Uploader()
git = Git(branch, version)
if not git.is_upstream_clone():
proceed = input_with_default('This is not a clone of an %supstream%s Infinispan Redis CacheStore repository! Are you sure you want to proceed?' % (Colors.UNDERLINE, Colors.END), 'N')
if not proceed.upper().startswith('Y'):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
## Make sure we don't include un-needed content in the release
prettyprint("Step 1: Cleaning up working directory (un-tracked and modified files)", Levels.INFO)
git.clean_release_directory()
prettyprint("Step 1: Complete", Levels.INFO)
## Release order:
# Step 1: Tag in Git
prettyprint("Step 2: Tagging %s in git as %s" % (branch, version), Levels.INFO)
tag_release(version, branch)
prettyprint("Step 2: Complete", Levels.INFO)
# Step 2: Update version in tagged files
prettyprint("Step 3: Updating version number in source files", Levels.INFO)
version_next = update_versions(base_dir, version)
prettyprint("Step 3: Complete", Levels.INFO)
# Step 3: Build and test in Maven2
prettyprint("Step 4: Build and test in Maven", Levels.INFO)
maven_build_distribution(version)
prettyprint("Step 4: Complete", Levels.INFO)
## Tag the release
git.tag_for_release()
step_no=5
# Switch back to the branch being released
git.switch_to_branch()
# Update to next version
if version_next is not None:
prettyprint("Step %s: Updating version number for next release" % step_no, Levels.INFO)
update_versions(base_dir, version_next)
prettyprint("Step %s: Complete" % step_no, Levels.INFO)
if not settings['dry_run']:
git.push_tag_to_origin()
if version_next is not None:
git.push_branch_to_origin()
git.cleanup()
else:
prettyprint("In dry-run mode. Not pushing tag to remote origin and not removing temp release branch %s." % git.working_branch, Levels.DEBUG)
prettyprint("\n\n\nDone!", Levels.INFO)
if __name__ == "__main__":
release()
|
sample_spreadsheetServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from sample_spreadsheet.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'sample_spreadsheet'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from sample_spreadsheet.sample_spreadsheetImpl import sample_spreadsheet # noqa @IgnorePep8
impl_sample_spreadsheet = sample_spreadsheet(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'sample_spreadsheet'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_sample_spreadsheet.run_sample_spreadsheet,
name='sample_spreadsheet.run_sample_spreadsheet',
types=[dict])
self.method_authentication['sample_spreadsheet.run_sample_spreadsheet'] = 'required' # noqa
self.rpc_service.add(impl_sample_spreadsheet.status,
name='sample_spreadsheet.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'sample_spreadsheet ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
example_binance_jex.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_binance_jex.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
logging.getLogger("unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager")
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
# create instance of BinanceWebSocketApiManager for Binance Jersey
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="jex.com")
# set api key and secret for userData stream
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bb")
omt_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!optionMiniTicker"])
smt_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!spotMiniTicker"])
st_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!spotTicker"])
spot_markets = {'eosbtc', 'ltcbtc', 'ethbtc', 'dashbtc'}
spot_channels = {'spotTrade', 'spotMiniTicker', 'spotDepth20', 'spotDepthUpdate', 'spotTicker'}
binance_websocket_api_manager.create_stream(["spotTrade"], spot_markets)
binance_websocket_api_manager.create_stream(["spotDepth10"], spot_markets)
binance_websocket_api_manager.create_stream(["spotDepth20"], spot_markets)
binance_websocket_api_manager.create_stream(spot_channels, spot_markets)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
#binance_websocket_api_manager.print_stream_info(userdata_stream_id)
time.sleep(1)
|
threading.py
|
"""A threading based handler.
The :class:`SequentialThreadingHandler` is intended for regular Python
environments that use threads.
.. warning::
Do not use :class:`SequentialThreadingHandler` with applications
using asynchronous event loops (like gevent). Use the
:class:`~kazoo.handlers.gevent.SequentialGeventHandler` instead.
"""
from __future__ import absolute_import
from collections import defaultdict
import errno
from itertools import chain
import logging
import select
import socket
import threading
import time
import six
import kazoo.python2atexit as python2atexit
from kazoo.handlers import utils
try:
import Queue
except ImportError: # pragma: nocover
import queue as Queue
# sentinel objects
_STOP = object()
log = logging.getLogger(__name__)
_HAS_EPOLL = hasattr(select, "epoll")
def _to_fileno(obj):
if isinstance(obj, six.integer_types):
fd = int(obj)
elif hasattr(obj, "fileno"):
fd = obj.fileno()
if not isinstance(fd, six.integer_types):
raise TypeError("fileno() returned a non-integer")
fd = int(fd)
else:
raise TypeError("argument must be an int, or have a fileno() method.")
if fd < 0:
raise ValueError(
"file descriptor cannot be a negative integer (%d)" % (fd,)
)
return fd
class KazooTimeoutError(Exception):
pass
class AsyncResult(utils.AsyncResult):
"""A one-time event that stores a value or an exception"""
def __init__(self, handler):
super(AsyncResult, self).__init__(handler,
threading.Condition,
KazooTimeoutError)
class SequentialThreadingHandler(object):
"""Threading handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially. These are split into two
queues, one for watch events and one for async result completion
callbacks.
Each queue type has a thread worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch and completion callbacks should avoid blocking behavior as
the next callback of that type won't be run until it completes. If
you need to block, spawn a new thread and return immediately so
callbacks can proceed.
.. note::
Completion callbacks can block to wait on Zookeeper calls, but
no other completion callbacks will execute until the callback
returns.
"""
name = "sequential_threading_handler"
timeout_exception = KazooTimeoutError
sleep_func = staticmethod(time.sleep)
queue_impl = Queue.Queue
queue_empty = Queue.Empty
def __init__(self):
"""Create a :class:`SequentialThreadingHandler` instance"""
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
self._running = False
self._state_change = threading.Lock()
self._workers = []
@property
def running(self):
return self._running
def _create_thread_worker(self, queue):
def _thread_worker(): # pragma: nocover
while True:
try:
func = queue.get()
try:
if func is _STOP:
break
func()
except Exception:
log.exception("Exception in worker queue thread")
finally:
queue.task_done()
del func # release before possible idle
except self.queue_empty:
continue
t = self.spawn(_thread_worker)
return t
def start(self):
"""Start the worker threads."""
with self._state_change:
if self._running:
return
# Spawn our worker threads, we have
# - A callback worker for watch events to be called
# - A completion worker for completion events to be called
for queue in (self.completion_queue, self.callback_queue):
w = self._create_thread_worker(queue)
self._workers.append(w)
self._running = True
python2atexit.register(self.stop)
def stop(self):
"""Stop the worker threads and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.completion_queue, self.callback_queue):
queue.put(_STOP)
self._workers.reverse()
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
python2atexit.unregister(self.stop)
def select(self, *args, **kwargs):
# if we have epoll, and select is not expected to work
# use an epoll-based "select". Otherwise don't touch
# anything to minimize changes
if _HAS_EPOLL:
# if the highest fd we've seen is > 1023
if max(map(_to_fileno, chain.from_iterable(args[:3]))) > 1023:
return self._epoll_select(*args, **kwargs)
return self._select(*args, **kwargs)
def _select(self, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
# either the time to give up, or None
end = (time.time() + timeout) if timeout else None
while end is None or time.time() < end:
if end is not None:
# make a list, since tuples aren't mutable
args = list(args)
# set the timeout to the remaining time
args[3] = end - time.time()
try:
return select.select(*args, **kwargs)
except select.error as ex:
# if the system call was interrupted, we'll retry until timeout
# in Python 3, system call interruptions are a native exception
# in Python 2, they are not
errnum = ex.errno if isinstance(ex, OSError) else ex[0]
if errnum == errno.EINTR:
continue
raise
# if we hit our timeout, lets return as a timeout
return ([], [], [])
def _epoll_select(self, rlist, wlist, xlist, timeout=None):
"""epoll-based drop-in replacement for select to overcome select
limitation on a maximum filehandle value
"""
if timeout is None:
timeout = -1
eventmasks = defaultdict(int)
rfd2obj = defaultdict(list)
wfd2obj = defaultdict(list)
xfd2obj = defaultdict(list)
read_evmask = select.EPOLLIN | select.EPOLLPRI # Just in case
def store_evmasks(obj_list, evmask, fd2obj):
for obj in obj_list:
fileno = _to_fileno(obj)
eventmasks[fileno] |= evmask
fd2obj[fileno].append(obj)
store_evmasks(rlist, read_evmask, rfd2obj)
store_evmasks(wlist, select.EPOLLOUT, wfd2obj)
store_evmasks(xlist, select.EPOLLERR, xfd2obj)
poller = select.epoll()
for fileno in eventmasks:
poller.register(fileno, eventmasks[fileno])
try:
events = poller.poll(timeout)
revents = []
wevents = []
xevents = []
for fileno, event in events:
if event & read_evmask:
revents += rfd2obj.get(fileno, [])
if event & select.EPOLLOUT:
wevents += wfd2obj.get(fileno, [])
if event & select.EPOLLERR:
xevents += xfd2obj.get(fileno, [])
finally:
poller.close()
return revents, wevents, xevents
def socket(self):
return utils.create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(socket, *args, **kwargs)
def create_socket_pair(self):
return utils.create_socket_pair(socket)
def event_object(self):
"""Create an appropriate Event object"""
return threading.Event()
def lock_object(self):
"""Create a lock object"""
return threading.Lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return threading.RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance"""
return AsyncResult(self)
def spawn(self, func, *args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialThreadingHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
|
mydealz.py
|
#!/usr/bin/python
# coding=utf-8
'''
The MIT License (MIT)
Copyright (c) 2015 Roy Freytag
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
import json
import os
import re
import requests
import sys
import telebot
import threading
import time
import traceback
from bs4 import BeautifulSoup as bs
from contextlib import suppress
from colorama import init, Fore, Back, Style
from emoji import emojize
#from pyshorteners import Shortener
from threading import Thread
# Emoji definitions
wave = emojize(":wave:", use_aliases=True)
hot = emojize(":fire:", use_aliases=True)
free = emojize(":free:", use_aliases=True)
wish = emojize(":star:", use_aliases=True)
# Basic stuff
os.chdir(os.path.dirname(os.path.realpath(__file__)))
init(autoreset=True) # Colorama
#shortener = Shortener("Isgd")
header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36 OPR/55.0.2994.61"}
# Get settings from file
def get_settings():
global debug_mode; global short_url; global telegram
global sleep_time; global tg_token; global tg_token_priority
global tg_cid; global tg_cid2
debug_mode = 0
short_url = 0
telegram = 0
settings = {}
exec(open("./settings.txt").read(), None, settings)
if settings["debug_mode"]:
debug_mode = 1
if settings["short_url"]:
short_url = 1
if settings["telegram"]:
telegram = 1
sleep_time = settings["sleep_time"]
tg_token = settings["tg_token"]
tg_token_priority = settings["tg_token_priority"]
tg_cid = settings["tg_cid"]
tg_cid2 = settings["tg_cid2"]
get_settings()
# Debug mode
def debug(text):
if debug_mode:
print(Fore.YELLOW + "DEBUG: " + text)
return 0
# Get already found deals from file
def get_found():
global found_deals; global found_deals2
found_deals = [line.rstrip("\n") for line in open ("./found_{}.txt".format(tg_cid))]
found_deals2 = [line.rstrip("\n") for line in open ("./found_{}.txt".format(tg_cid2))]
# Get wanted articles from file
def get_wanted():
global wanted_articles; global wanted_articles2
wanted_articles = [line.rstrip("\n") for line in open ("./wanted_{}.txt".format(tg_cid))]
print(Fore.CYAN + "User 1: Suche nach Deals fuer: " + str(wanted_articles).replace("[", "").replace("]", ""))
wanted_articles2 = [line.rstrip("\n") for line in open ("./wanted_{}.txt".format(tg_cid2))]
print(Fore.CYAN + "User 2: Suche nach Deals fuer: " + str(wanted_articles2).replace("[", "").replace("]", ""))
# Link processing
def process_link(link):
#try:
#proc_link = shortener.short(link)
#except:
# print("Shortener-Service nicht erreichbar. Verwende vollen Link.")
#proc_link = link
return link
# Telegram bot
bot = telebot.TeleBot(tg_token)
bot_priority = telebot.TeleBot(tg_token_priority)
@bot.message_handler(commands=["hello"])
def hello(msg):
cid = msg.chat.id
bot.send_message(cid, "Hi! " + wave + " Ich bin noch da, keine Sorge.")
@bot.message_handler(commands=["add"])
def add_item(msg):
cid = msg.chat.id
with open("./wanted_{}.txt".format(cid), "a") as f:
f.write(msg.text.replace("/add ", "") + "\n")
bot.send_message(cid, "Schlagwort wurde der Liste hinzugefügt.")
@bot.message_handler(commands=["remove"])
def remove_item(msg):
cid = msg.chat.id
with open("./wanted_{}.txt".format(cid), "r") as list:
lines = list.readlines()
with open("./wanted_{}.txt".format(cid), "w") as remove:
for line in lines:
if line.lower() != (msg.text.replace("/remove ", "") + "\n").lower():
remove.write(line)
bot.send_message(cid, "Schlagwort wurde von der Liste entfernt.")
@bot.message_handler(commands=["reset"])
def reset_found(msg):
cid = msg.chat.id
open("./found_{}.txt".format(cid), "w").close()
bot.send_message(cid, "Liste der gefundenen Deals wurde geleert.")
get_found()
@bot.message_handler(commands=["list"])
def list_items(msg):
cid = msg.chat.id
with open("./wanted_{}.txt".format(cid), "r") as list:
lines = list.readlines()
bot.send_message(cid, "Suche nach Deals für: " + str(lines).replace("[", "").replace("]", "")) # fix \n
def telegram_bot():
while True:
try:
bot.polling(none_stop=True)
except:
debug(traceback.format_exc())
time.sleep(5)
# Scraping routine
def scrape(url, type):
try:
#debug("Scraping " + type + " deals")
site = requests.get(url, headers=header, timeout=20)
soup = bs(site.content, "lxml")
debug("Request completed")
listings = soup.find_all("article", {"id":re.compile("thread_.*")})
if listings is None:
print("Keine Listings gefunden. Seite geändert?")
for thread in listings:
info = thread.find("a", class_="cept-tt thread-link linkPlain thread-title--list")
dealid = thread.attrs["id"]
if dealid in found_deals:
debug("Deal already found " + dealid)
continue
title = info.string.strip()
link = info.get("href")
if short_url:
proc_link = process_link(link)
else:
proc_link = link
# print("[" + type + "] %s: %s" % (re.sub(r"[^\x00-\x7F]+"," ", title), proc_link))
print("[" + "] %s: %s" % (re.sub(r"[^\x00-\x7F]+"," ", title), proc_link))
if telegram:
emoji = free
if type == hot:
emoji = hot
bot.send_message(tg_cid, emoji + " %s: %s" % (title, proc_link), disable_web_page_preview=True)
time.sleep(5)
bot.send_message(tg_cid2, emoji + " %s: %s" % (title, proc_link), disable_web_page_preview=True)
with open("./found_{}.txt".format(tg_cid), "a") as found:
found.write(dealid + "\n")
get_found()
time.sleep(4)
#debug("Scraping " + type + " deals complete")
except:
debug(traceback.format_exc())
time.sleep(60)
# User wanted scraping routine
def scrape_wanted(tg_cid, found_deals, articles, wanted_articles):
for wanted_item in wanted_articles:
deals = articles.find_all("a", string=re.compile("(?i).*("+wanted_item+").*"), class_="cept-tt thread-link linkPlain thread-title--list")
for thread in deals:
dealid = articles.attrs["id"]
if dealid in found_deals:
debug("Deal already found " + dealid)
continue
title = thread.string.strip()
link = thread.get("href")
if short_url:
proc_link = process_link(link)
else:
proc_link = link
print("[WANT] %s: %s" % (re.sub(r"[^\x00-\x7F]+"," ", title), proc_link))
if telegram:
bot_priority.send_message(tg_cid, wish + " %s: %s" % (title, proc_link), disable_web_page_preview=True)
with open("./found_{}.txt".format(tg_cid), "a") as found:
found.write(dealid + "\n")
get_found()
time.sleep(4)
# Hottest deals scraping routine
def scrape_hottest():
try:
debug("Fetching json for hottest deals")
json_url = requests.get("https://www.mydealz.de/widget/hottest?selectedRange=day&threadTypeTranslated=&merchant_name=&merchant_id=&eventId=&groupName=&context=listing", headers=header, timeout=20)
json_data = json_url.json()
debug("Request completed")
for thread in json_data["data"]["threads"]:
title = thread["title"].strip()
link = thread["url"]
if short_url:
proc_link = process_link(link)
else:
proc_link = link
dealid = "hot_" + str(thread["id"])
if dealid in found_deals:
debug("Deal already found " + dealid)
continue
print("[" + "] %s: %s" % (re.sub(r"[^\x00-\x7F]+"," ", title), proc_link))
if telegram:
bot_priority.send_message(tg_cid, hot + " %s: %s" % (title, proc_link), disable_web_page_preview=True)
bot_priority.send_message(tg_cid2, hot + " %s: %s" % (title, proc_link), disable_web_page_preview=True)
time.sleep(5)
with open("./found_{}.txt".format(tg_cid), "a") as found:
found.write(dealid + "\n")
get_found()
time.sleep(4)
debug("Processing hottest deals complete")
except:
debug(traceback.format_exc())
time.sleep(60)
# MyDealz scraper
def mydealz_scraper():
while True:
# Wanted scraper
try:
debug("Scraping for wanted items")
site = requests.get("https://www.mydealz.de/new?page=1", headers=header, timeout=20)
soup = bs(site.content, "lxml")
debug("Request completed")
listings = soup.find_all("article", {"id":re.compile("thread_.*")})
if listings is None:
print("Keine Listings gefunden. Seite geändert?")
for articles in listings:
scrape_wanted(tg_cid, found_deals, articles, wanted_articles)
scrape_wanted(tg_cid2, found_deals2, articles, wanted_articles2)
debug("Scraping for wanted items complete")
except:
debug(traceback.format_exc())
time.sleep(60)
# Hottest today scraper
#scrape_hottest()
# Hot deals scraper
#scrape("https://www.mydealz.de/hot?page=1", hot)
# Freebie scraper
#scrape("https://www.mydealz.de/gruppe/freebies-new?page=1", free)
debug("Now sleeping until next cycle")
time.sleep(sleep_time)
if __name__=="__main__":
# Check for required files
with suppress(Exception):
open("./wanted_{}.txt".format(tg_cid), "x")
with suppress(Exception):
open("./found_{}.txt".format(tg_cid), "x")
with suppress(Exception):
open("./wanted_{}.txt".format(tg_cid2), "x")
with suppress(Exception):
open("./found_{}.txt".format(tg_cid2), "x")
# Initial fetch
get_wanted()
get_found()
Thread(target = telegram_bot).start()
Thread(target = mydealz_scraper).start()
|
TextSharding.py
|
# coding=utf-8
# Copyright 2021 Intel Corporation. All rights reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from itertools import islice
import multiprocessing
import statistics
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class Sharding:
def __init__(
self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set, segment
):
assert len(input_files) > 0, "The input file list must contain at least one file."
assert n_training_shards > 0, "There must be at least one output shard."
assert n_test_shards > 0, "There must be at least one output shard."
self.n_training_shards = n_training_shards
self.n_test_shards = n_test_shards
self.fraction_test_set = fraction_test_set
self.segment = segment
self.input_files = input_files
self.output_name_prefix = output_name_prefix
self.output_training_identifier = "training"
self.output_test_identifier = "test"
self.output_file_extension = ".txt"
self.articles = {} # key: integer identifier, value: list of articles
self.sentences = {} # key: integer identifier, value: list of sentences
self.output_training_files = {} # key: filename, value: list of articles to go into file
self.output_test_files = {} # key: filename, value: list of articles to go into file
self.init_output_files()
# Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines)
def load_articles(self):
logger.info("Start: Loading Articles")
global_article_count = 0
for input_file in self.input_files:
logger.info(f"input file: {input_file}")
with open(input_file, mode="r", newline="\n") as f:
for i, line in enumerate(f):
if line.strip():
self.articles[global_article_count] = line.rstrip()
global_article_count += 1
logger.info(f"End: Loading Articles: There are {len(self.articles)} articles.")
def segment_articles_into_sentences(self, segmenter):
if len(self.articles) is 0:
self.load_articles()
assert (
len(self.articles) is not 0
), "Please check that input files are present and contain data."
if self.segment:
logger.info("Start: Sentence Segmentation")
# TODO: WIP: multiprocessing (create independent ranges and spawn processes)
use_multiprocessing = "serial"
def chunks(data, size=len(self.articles)):
it = iter(data)
for i in range(0, len(data), size):
yield {k: data[k] for k in islice(it, size)}
if use_multiprocessing == "manager":
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
n_processes = 7 # in addition to the main process, total = n_proc+1
def work(articles, return_dict):
sentences = {}
for i, article in enumerate(articles):
sentences[i] = segmenter.segment_string(articles[article])
if i % 5000 == 0:
logger.info(f"Segmenting article {i}")
return_dict.update(sentences)
for item in chunks(self.articles, len(self.articles)):
p = multiprocessing.Process(target=work, args=(item, return_dict))
# Busy wait
while len(jobs) >= n_processes:
pass
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
elif use_multiprocessing == "queue":
work_queue = multiprocessing.Queue()
jobs = []
for item in chunks(self.articles, len(self.articles)):
pass
else: # serial option
for i, article in enumerate(self.articles):
self.sentences[i] = segmenter.segment_string(self.articles[article])
if i % 5000 == 0:
logger.info(f"Segmenting article {i}")
else:
logger.info('Not segmenting')
for i, article in enumerate(self.articles):
self.sentences[i] = [self.articles[article]]
logger.info("End: Sentence Segmentation")
def init_output_files(self):
logger.info("Start: Init Output Files")
assert (
len(self.output_training_files) is 0
), "Internal storage self.output_files already contains data. This function is intended to be used by the constructor only."
assert (
len(self.output_test_files) is 0
), "Internal storage self.output_files already contains data. This function is intended to be used by the constructor only."
for i in range(self.n_training_shards):
name = (
self.output_name_prefix
+ self.output_training_identifier
+ str(i)
+ self.output_file_extension
)
self.output_training_files[name] = []
for i in range(self.n_test_shards):
name = (
self.output_name_prefix
+ self.output_test_identifier
+ str(i)
+ self.output_file_extension
)
self.output_test_files[name] = []
logger.info("End: Init Output Files")
def get_sentences_per_shard(self, shard):
result = 0
for article_id in shard:
result += len(self.sentences[article_id])
return result
def distribute_articles_over_shards(self):
logger.info("Start: Distribute Articles Over Shards")
assert (
len(self.articles) >= self.n_training_shards + self.n_test_shards
), "There are fewer articles than shards. Please add more data or reduce the number of shards requested."
# Create dictionary with - key: sentence count per article, value: article id number
sentence_counts = defaultdict(lambda: [])
max_sentences = 0
total_sentences = 0
for article_id in self.sentences:
current_length = len(self.sentences[article_id])
sentence_counts[current_length].append(article_id)
max_sentences = max(max_sentences, current_length)
total_sentences += current_length
n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences)
nominal_sentences_per_training_shard = (
n_sentences_assigned_to_training // self.n_training_shards
)
nominal_sentences_per_test_shard = (
total_sentences - n_sentences_assigned_to_training
) // self.n_test_shards
consumed_article_set = set({})
unused_article_set = set(self.articles.keys())
# Make first pass and add one article worth of lines per file
for file in self.output_training_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard:
nominal_sentences_per_training_shard = len(self.sentences[current_article_id])
logger.info(
"Warning: A single article contains more than the nominal number of sentences per training shard."
)
for file in self.output_test_files:
current_article_id = sentence_counts[max_sentences][-1]
sentence_counts[max_sentences].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard:
nominal_sentences_per_test_shard = len(self.sentences[current_article_id])
logger.info(
"Warning: A single article contains more than the nominal number of sentences per test shard."
)
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
for shard in self.output_test_files:
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
# Make subsequent passes over files to find articles to add without going over limit
history_remaining = []
n_history_remaining = 4
while len(consumed_article_set) < len(self.articles):
for fidx, file in enumerate(self.output_training_files):
nominal_next_article_size = min(
nominal_sentences_per_training_shard - training_counts[fidx], max_sentences
)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while (
len(sentence_counts[nominal_next_article_size]) == 0
and nominal_next_article_size > 0
):
nominal_next_article_size -= 1
if (
nominal_next_article_size not in sentence_counts
or nominal_next_article_size is 0
or training_counts[fidx] > training_median
):
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_training_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
for fidx, file in enumerate(self.output_test_files):
nominal_next_article_size = min(
nominal_sentences_per_test_shard - test_counts[fidx], max_sentences
)
# Maintain the max sentence count
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
max_sentences -= 1
while (
len(sentence_counts[nominal_next_article_size]) == 0
and nominal_next_article_size > 0
):
nominal_next_article_size -= 1
if (
nominal_next_article_size not in sentence_counts
or nominal_next_article_size is 0
or test_counts[fidx] > test_median
):
continue # skip adding to this file, will come back later if no file can accept unused articles
current_article_id = sentence_counts[nominal_next_article_size][-1]
sentence_counts[nominal_next_article_size].pop(-1)
self.output_test_files[file].append(current_article_id)
consumed_article_set.add(current_article_id)
unused_article_set.remove(current_article_id)
# If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed
if len(history_remaining) == n_history_remaining:
history_remaining.pop(0)
history_remaining.append(len(unused_article_set))
history_same = True
for i in range(1, len(history_remaining)):
history_same = history_same and (history_remaining[i - 1] == history_remaining[i])
if history_same:
nominal_sentences_per_training_shard += 1
# nominal_sentences_per_test_shard += 1
training_counts = []
test_counts = []
for shard in self.output_training_files:
training_counts.append(
self.get_sentences_per_shard(self.output_training_files[shard])
)
for shard in self.output_test_files:
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
training_median = statistics.median(training_counts)
test_median = statistics.median(test_counts)
logger.info(f"Distributing data over shards: {len(unused_article_set)} articles remaining.")
if len(unused_article_set) != 0:
logger.info("Warning: Some articles did not make it into output files.")
for shard in self.output_training_files:
logger.info(
f"Training shard: {self.get_sentences_per_shard(self.output_training_files[shard])}"
)
for shard in self.output_test_files:
logger.info(f"Test shard: {self.get_sentences_per_shard(self.output_test_files[shard])}")
logger.info("End: Distribute Articles Over Shards")
def write_shards_to_disk(self):
logger.info("Start: Write Shards to Disk")
for shard in self.output_training_files:
self.write_single_shard(shard, self.output_training_files[shard])
for shard in self.output_test_files:
self.write_single_shard(shard, self.output_test_files[shard])
logger.info("End: Write Shards to Disk")
def write_single_shard(self, shard_name, shard):
with open(shard_name, mode="w", newline="\n") as f:
for article_id in shard:
for line in self.sentences[article_id]:
f.write(line + "\n")
f.write("\n") # Line break between articles
try:
import nltk
nltk.download("punkt")
except ModuleNotFoundError or ImportError as e:
logger.info("nltk is required for sharding. please install before running.")
class NLTKSegmenter:
def segment_string(self, article):
return nltk.tokenize.sent_tokenize(article)
|
sqsqueue.py
|
import sys
import boto3
import multiprocessing
# init logger
from logging import getLogger, StreamHandler, DEBUG
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
sqs = boto3.resource("sqs")
def get_queue(name="chinachu-encode"):
queue = None
try:
queue = sqs.get_queue_by_name(QueueName="chinachu-encode")
logger.debug("got logger: {}".format(queue))
except Exception as e:
logger.exception("Error: {}".format(e))
sys.exit(-1)
return queue
def loop_queue(queue, func=None):
if func is None:
def f(message): return (True, None)
func = f
while 1:
msg_list = queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=20)
if not msg_list:
logger.debug("got blank msg_list: {}".format(msg_list))
continue
for message in msg_list:
p = multiprocessing.Process(target=func, args=(message,))
p.start()
p.join()
def main():
def f(message):
logger.debug("msg: {}".format(message))
message.delete()
return (True, None)
loop_queue(get_queue("chinachu-encode"), f)
if __name__ == "__main__":
main()
|
NodeUDP.py
|
from Node import *
from socket import *
from ReachabilityTables import *
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
GG = '\033[96m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class NodeUDP(Node):
def __init__(self, ip, port):
super().__init__("intAS", ip, int(port))
self.reachability_table = ReachabilityTables()
# Start server thread.
self.threadServer = threading.Thread(target = self.server_udp)
# Continue the server, even after our main thread dies.
self.threadServer.daemon = True
self.threadServer.start()
# Run our menu.
self.listen()
def server_udp(self):
self.server_socket = socket(AF_INET, SOCK_DGRAM)
self.server_socket.bind((self.ip, self.port))
print ("El servidor esta listo para ser usado : ", self.ip, self.port)
while True:
message, client_addr = self.server_socket.recvfrom(1024)
if int.from_bytes(message, byteorder="big") != 0:
self.log_writer.write_log("UDP node received a message.", 1)
elements_quantity = int.from_bytes(message[:2], byteorder="big")
for n in range(0,elements_quantity):
ip_bytes = message[2+(n*8):6+(n*8)]
mask = message[6+(n*8)]
cost_bytes = message[7+(n*8):10+(n*8)]
ip = list(ip_bytes)
ip_str = ""
for byte in range(0,len(ip)):
if(byte < len(ip)-1):
ip_str += str(ip[byte])+"."
else:
ip_str += str(ip[byte])
mask_str = str(mask)
cost = int.from_bytes(cost_bytes,byteorder="big")
self.reachability_table.save_address(ip_str, client_addr[0],
mask_str, cost, int(client_addr[1]))
else:
# Remove from our reachability table.
self.reachability_table.remove_address(client_addr[0], int(client_addr[1]))
print("Message Recieved")
err = bytes([2])
self.server_socket.sendto(err, client_addr)
# Send messages to another node.
def send_message(self):
self.log_writer.write_log("UDP node is sending a message.", 2)
# Variables that we will use to keep the user's input.
port = ""
mask = ""
ip_destination = ""
# Variable to check each input of the user.
valid_input = False
while not valid_input:
ip_destination = input("Digite la ip de destino a la que desea enviar: ")
valid_input = self.validate_ip(ip_destination)
valid_input = False
while not valid_input:
mask = input("Digite la máscara de destino a la que desea enviar: ")
valid_input = self.validate_mask(mask)
valid_input = False
while not valid_input:
port = input("Digite el puerto de destino a la que desea enviar: ")
valid_input = self.validate_port(port)
n = input("Digite la cantidad de mensajes que va enviar a ese destino: ")
num = 1
valid_input = False
while not valid_input:
try:
num = int(n)
valid_input = True
except ValueError:
print(BColors.FAIL + "Error: " + BColors.ENDC + "Entrada no númerica")
port_destination = int(port)
mask_destination = int(mask)
elements_quantity = num.to_bytes(2, byteorder="big")
byte_array = bytearray(elements_quantity)
for i in range(0, num):
ip_message = ""
mask_message = ""
cost_message = ""
valid_input = False
while not valid_input:
ip_message = input("Digite la ip de destino a la que desea enviar: ")
valid_input = self.validate_ip(ip_message)
valid_input = False
while not valid_input:
mask_message = input("Digite la máscara de destino a la que desea enviar: ")
valid_input = self.validate_mask(mask_message)
valid_input = False
while not valid_input:
cost_message = input("Digite un costo: ")
valid_input = self.validate_cost(cost_message)
byte_array.extend(bytearray(bytes(map(int, ip_message.split(".")))))
byte_array.extend((int(mask_message)).to_bytes(1, byteorder="big"))
byte_array.extend(int(cost_message).to_bytes(3, byteorder="big"))
try:
self.client_socket = socket(AF_INET, SOCK_DGRAM)
self.client_socket.connect((str(ip_destination), port_destination))
self.client_socket.send(byte_array)
modified_sentence = self.client_socket.recv(1024)
print ("From Server:" , modified_sentence)
self.client_socket.close()
except BrokenPipeError:
print("Se perdió la conexión con el servidor")
def terminate_node(self):
print("Eliminado el nodo.")
def listen(self):
# Print our menu.
print(BColors.WARNING + "Bienvenido!, Node: " + self.ip, ":", str(self.port) + BColors.ENDC)
print(BColors.OKGREEN + "Instrucciones: " + BColors.ENDC)
print(BColors.BOLD + "-1-" + BColors.ENDC, "Enviar un mensaje a otro nodo")
print(BColors.BOLD + "-2-" + BColors.ENDC, "Terminar a este nodo")
print(BColors.BOLD + "-3-" + BColors.ENDC, "Imprimir la tabla de alcanzabilidad")
print(BColors.BOLD + "-4-" + BColors.ENDC, "Salir")
user_input = input("Qué desea hacer?\n")
if user_input == "1":
self.send_message()
self.listen()
elif user_input == "2":
print ("Eliminando nodo")
self.terminate_node()
elif user_input == "3":
self.reachability_table.print_table()
self.listen()
elif user_input == "4":
print("Terminando ejecucción.")
else:
print("Por favor, escoja alguna de las opciones.")
self.listen()
|
widget.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py
----------------------------------
"""
import datetime
import sys
import cStringIO
import time
import thread
import threading
import os
import socket
import signal
import math
import logging
import newcron
import getpass
import gluon.main as main
from gluon.fileutils import read_file, write_file, create_welcome_w2p
from gluon.settings import global_settings
from gluon.shell import run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
datetime.datetime.now().year)
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.5, 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
import subprocess
major_version = sys.version_info[0]
minor_version = sys.version_info[1]
if major_version == 2:
if minor_version in (5, 6):
sys.stderr.write("Python 2.5 or 2.6\n")
ret = subprocess.call(['unit2', '-v', 'gluon.tests'])
elif minor_version in (7,):
call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests']
if options.with_coverage:
try:
import coverage
coverage_config = os.environ.get(
"COVERAGE_PROCESS_START",
os.path.join('gluon', 'tests', 'coverage.ini'))
call_args = ['coverage', 'run', '--rcfile=%s' %
coverage_config,
'-m', 'unittest', '-v', 'gluon.tests']
except:
sys.stderr.write('Coverage was not installed, skipping\n')
sys.stderr.write("Python 2.7\n")
ret = subprocess.call(call_args)
else:
sys.stderr.write("unknown python 2.x version\n")
ret = 256
else:
sys.stderr.write("Only Python 2.x supported.\n")
ret = 256
sys.exit(ret and 1)
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
else:
host = host.replace('0.0.0.0', '127.0.0.1')
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print 'please visit:'
print '\t', url
print 'starting browser...'
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
import Tkinter
import tkMessageBox
bg_color = 'white'
root.withdraw()
self.root = Tkinter.Toplevel(root, bg=bg_color)
self.root.resizable(0, 0)
self.root.title(ProgramName)
self.options = options
self.scheduler_processes = {}
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
iconphoto = os.path.join('extras', 'icons', 'web2py.gif')
if os.path.exists(iconphoto):
img = Tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# Prepare the logo area
self.logoarea = Tkinter.Canvas(self.root,
background=bg_color,
width=300,
height=300)
self.logoarea.grid(row=0, column=0, columnspan=4, sticky=sticky)
self.logoarea.after(1000, self.update_canvas)
logo = os.path.join('extras', 'icons', 'splashlogo.gif')
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(self.logoarea, image=img, background=bg_color, bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
# Prepare the banner area
self.bannerarea = Tkinter.Canvas(self.root,
bg=bg_color,
width=300,
height=300)
self.bannerarea.grid(row=1, column=1, columnspan=2, sticky=sticky)
Tkinter.Label(self.bannerarea, anchor=Tkinter.N,
text=str(ProgramVersion + "\n" + ProgramAuthor),
font=('Helvetica', 11), justify=Tkinter.CENTER,
foreground='#195866', background=bg_color,
height=3).pack( side='top',
fill='both',
expand='yes')
self.bannerarea.after(1000, self.update_canvas)
# IP
Tkinter.Label(self.root,
text='Server IP:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=4,
column=1,
sticky=sticky)
self.ips = {}
self.selected_ip = Tkinter.StringVar()
row = 4
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in options.ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = Tkinter.Radiobutton(
self.root, bg=bg_color, highlightthickness=0,
selectcolor='light grey', width=30,
anchor=Tkinter.W, text='%s (%s)' % (legend, ip),
justify=Tkinter.LEFT,
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=2, sticky=sticky)
if row == 4:
self.ips[ip].select()
row += 1
shift = row
# Port
Tkinter.Label(self.root,
text='Server Port:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift,
column=1, pady=10,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=2, sticky=sticky, pady=10)
# Password
Tkinter.Label(self.root,
text='Choose Password:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift + 1,
column=1,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=2, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=400,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=1, columnspan=2, pady=5,
sticky=sticky)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=1, columnspan=2, pady=5,
sticky=sticky)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0, sticky=sticky)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1, sticky=sticky)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
apps = []
available_apps = [arq for arq in os.listdir('applications/')]
available_apps = [arq for arq in available_apps
if os.path.exists(
'applications/%s/models/scheduler.py' % arq)]
if start:
# the widget takes care of starting the scheduler
if self.options.scheduler and self.options.with_scheduler:
apps = [app.strip() for app
in self.options.scheduler.split(',')
if app in available_apps]
for app in apps:
self.try_start_scheduler(app)
# reset the menu
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
code = "from gluon import current;current._scheduler.loop()"
print 'starting scheduler from widget for "%s"...' % app
args = (app, True, True, None, False, code)
logging.getLogger().setLevel(self.options.debuglevel)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print "Currently running %s scheduler processes" % (
len(self.scheduler_processes))
p.start()
print "Processes started"
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Checks taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Updates app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connects pages """
# reset the menu
available_apps = [arq for arq in os.listdir('applications/')
if os.path.exists(
'applications/%s/__init__.py' % arq)]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finishes the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Shows error message """
import tkMessageBox
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Starts web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
# Check for non default value for ssl inputs
if (len(self.options.ssl_certificate) > 0 or
len(self.options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stops web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Updates canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 400
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(
usage, None, optparse.Option, ProgramVersion)
parser.description = description
msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
'Note: This value is ignored when using the \'interfaces\' option.')
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help=msg)
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
parser.add_option('-G',
'--GAE',
default=None,
dest='gae',
help="'-G configure' will create app.yaml and gaehandler.py")
msg = ('password to be used for administration '
'(use -a "<recycle>" to reuse the last password))')
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
msg = ('Use this file containing the CA certificate to validate X509 '
'certificates from clients')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help=msg)
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
parser.add_option('-e',
'--errors_to_console',
action='store_true',
dest='print_errors',
default=False,
help='log all errors to console')
msg = ('set debug output level (0-100, 0 means all, 100 means none; '
'default is 30)')
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = ('run web2py in interactive shell or IPython (if installed) with '
'specified appname (if app does not exist it will be created). '
'APPNAME like a/c/f (c,f optional)')
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = ('run web2py in interactive shell or bpython (if installed) with '
'specified appname (if app does not exist it will be created).\n'
'Use combined with --shell')
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = ('auto import model files; default is False; should be used '
'with --shell option')
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = ('run PYTHON_FILE in web2py environment; '
'should be used with --shell option')
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = ('run scheduled tasks for the specified apps: expects a list of '
'app names as -K app1,app2,app3 '
'or a list of app:groups as -K app1:group1:group2,app2:group1 '
'to override specific group_names. (only strings, no spaces '
'allowed. Requires a scheduler defined in the models')
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
parser.add_option('-X',
'--with-scheduler',
action='store_true',
default=False,
dest='with_scheduler',
help=msg)
msg = ('run doctests in web2py environment; '
'TEST_PATH like a/c/f (c,f optional)')
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-Y',
'--run-cron',
action='store_true',
dest='runcron',
default=False,
help='start the background cron process')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_dir',
default=None,
help='profiler dir')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
msg = ('should be followed by a list of arguments to be passed to script, '
'to be used with -S, -A must be the last option')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help=msg)
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = ('listen on multiple addresses: '
'"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
'(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
'square [] brackets)')
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
msg = 'runs web2py tests'
parser.add_option('--run_system_tests',
action='store_true',
dest='run_system_tests',
default=False,
help=msg)
msg = ('adds coverage reporting (needs --run_system_tests), '
'python 2.7 and the coverage module installed. '
'You can alter the default path setting the environmental '
'var "COVERAGE_PROCESS_START". '
'By default it takes gluon/tests/coverage.ini')
parser.add_option('--with_coverage',
action='store_true',
dest='with_coverage',
default=False,
help=msg)
if '-A' in sys.argv:
k = sys.argv.index('-A')
elif '--args' in sys.argv:
k = sys.argv.index('--args')
else:
k = len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
if options.gae:
if not os.path.exists('app.yaml'):
name = raw_input("Your GAE app name: ")
content = open(os.path.join('examples', 'app.example.yaml'), 'rb').read()
open('app.yaml', 'wb').write(content.replace("yourappname", name))
else:
print "app.yaml alreday exists in the web2py folder"
if not os.path.exists('gaehandler.py'):
content = open(os.path.join('handlers', 'gaehandler.py'), 'rb').read()
open('gaehandler.py', 'wb').write(content)
else:
print "gaehandler.py alreday exists in the web2py folder"
sys.exit(0)
try:
options.ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
options.ips = []
if options.run_system_tests:
run_system_tests(options)
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.plain = True # cronjobs use a plain shell
options.nobanner = True
options.nogui = True
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
# (no spaces; optional key:cert indicate SSL)
if isinstance(options.interfaces, str):
interfaces = options.interfaces.split(';')
options.interfaces = []
for interface in interfaces:
if interface.startswith('['): # IPv6
ip, if_remainder = interface.split(']', 1)
ip = ip[1:]
if_remainder = if_remainder[1:].split(':')
if_remainder[0] = int(if_remainder[0]) # numeric port
options.interfaces.append(tuple([ip] + if_remainder))
else: # IPv4
interface = interface.split(':')
interface[1] = int(interface[1]) # numeric port
options.interfaces.append(tuple(interface))
# accepts --scheduler in the form
# "app:group1,group2,app2:group1"
scheduler = []
options.scheduler_groups = None
if isinstance(options.scheduler, str):
if ':' in options.scheduler:
for opt in options.scheduler.split(','):
scheduler.append(opt.split(':'))
options.scheduler = ','.join([app[0] for app in scheduler])
options.scheduler_groups = scheduler
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
create_welcome_w2p()
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
return options, args
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app, options):
if len(app) == 1 or app[1] is None:
code = "from gluon import current;current._scheduler.loop()"
else:
code = "from gluon import current;current._scheduler.group_names = ['%s'];"
code += "current._scheduler.loop()"
code = code % ("','".join(app[1:]))
app_ = app[0]
if not check_existent_app(options, app_):
print "Application '%s' doesn't exist, skipping" % app_
return None, None
return app_, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
apps = [(app.strip(), None) for app in options.scheduler.split(',')]
if options.scheduler_groups:
apps = options.scheduler_groups
code = "from gluon import current;current._scheduler.loop()"
logging.getLogger().setLevel(options.debuglevel)
if len(apps) == 1 and not options.with_scheduler:
app_, code = get_code_for_scheduler(apps[0], options)
if not app_:
return
print 'starting single-scheduler for "%s"...' % app_
run(app_, True, True, None, False, code)
return
# Work around OS X problem: http://bugs.python.org/issue9405
import urllib
urllib.getproxies()
for app in apps:
app_, code = get_code_for_scheduler(app, options)
if not app_:
continue
print 'starting scheduler for "%s"...' % app_
args = (app_, True, True, None, False, code)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print "Processes started"
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print "Processes stopped"
except:
p.terminate()
p.join()
def start(cron=True):
""" Starts server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from pydal.drivers import DRIVERS
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(DRIVERS)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options, key):
setattr(options, key, getattr(options2, key))
logfile0 = os.path.join('extras', 'examples', 'logging.example.conf')
if not os.path.exists('logging.conf') and os.path.exists(logfile0):
import shutil
sys.stdout.write("Copying logging.conf.example to logging.conf ... ")
shutil.copyfile('logging.example.conf', logfile0)
sys.stdout.write("OK\n")
# ## if -T run doctests (no cron)
if hasattr(options, 'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if options.folder:
os.chdir(options.folder)
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cronjob=options.cronjob)
return
# ## if -C start cron run (extcron) and exit
# ## -K specifies optional apps list (overloading scheduler)
if options.extcron:
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
if options.scheduler: # -K
apps = [app.strip() for app in options.scheduler.split(
',') if check_existent_app(options, app.strip())]
else:
apps = None
extcron = newcron.extcron(options.folder, apps=apps)
extcron.start()
extcron.join()
return
# ## if -K
if options.scheduler and not options.with_scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -H cron is enabled in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if cron and options.runcron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and options.runcron:
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui and options.password == '<ask>':
try:
import Tkinter
havetk = True
try:
root = Tkinter.Tk()
except:
pass
except (ImportError, OSError):
logger.warn(
'GUI not available because Tk library is not installed')
havetk = False
options.nogui = True
if root:
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ##-X (if no tk, the widget takes care of it himself)
if not root and options.scheduler and options.with_scheduler:
t = threading.Thread(target=start_schedulers, args=(options,))
t.start()
# ## start server
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
(ip, port) = (options.ip, int(options.port))
else:
first_if = options.interfaces[0]
(ip, port) = first_if[0], first_if[1]
# Check for non default value for ssl inputs
if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.nobanner:
message = '\nplease visit:\n\t%s\n' % url
if sys.platform.startswith('win'):
message += 'use "taskkill /f /pid %i" to shutdown the web2py server\n\n' % os.getpid()
else:
message += 'use "kill -SIGTERM %i" to shutdown the web2py server\n\n' % os.getpid()
print message
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
try:
f = open(filename, "r")
try:
for i, line in enumerate(f):
if lineno == i + 1:
break
else:
line = None
finally:
f.close()
except (IOError, OSError):
line = None
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
try:
t.join()
except:
pass
logging.shutdown()
|
agentConnMeasure.py
|
# Copyright (c) 2011-2013 Peng Sun. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYRIGHT file.
# agentConnMeasure.py
# Measure connection-level statistics (TCP, via web10G)
import time
import traceback
from threading import Thread
from uuid import getnode as get_mac
import agentManager
import agent_web10g_measure as web10g
from agentUtil import *
web10g_types_dict = {
'ThruOctetsReceived' : 'ThruOctetsReceived',
'Rwnd' : 'CurRwinRcvd',
'OtherReductionsCM' : 'OtherReductionsCM',
'RTT' : 'SampleRTT',
'OtherReductionsCV' : 'OtherReductionsCV',
'BytesRetran' : 'OctetsRetrans',
'MaxRTT' : 'MaxRTT',
'SoftErrorReason' : 'SoftErrorReason',
'SndLimTimeSnd' : 'SndLimTimeSnd',
'MaxRTO' : 'MaxRTO',
'Rcvbuf' : 'Rcvbuf',
'SndLimTransRwin' : 'SndLimTransRwin',
'NonRecovDAEpisodes' : 'NonRecovDAEpisodes',
'IpTtl' : 'IpTtl',
'CongSignals' : 'CongSignals',
'DataSegsOut' : 'DataSegsOut',
'LimRwin' : 'LimRwin',
'MSSRcvd' : 'MSSRcvd',
'WinScaleSent' : 'WinScaleSent',
'ZeroRwinRcvd' : 'ZeroRwinRcvd',
'StartTimeSecs' : 'StartTimeSecs',
'AbruptTimeouts' : 'AbruptTimeouts',
'NonRecovDA' : 'NonRecovDA',
'ECNsignals' : 'ECNsignals',
'Sndbuf' : 'Sndbuf',
'SpuriousFrDetected' : 'SpuriousFrDetected',
'IpTosOut' : 'IpTosOut',
'State' : 'State',
'Nagle' : 'Nagle',
'CurReasmQueue' : 'CurReasmQueue',
'ECESent' : 'ECESent',
'DupAcksOut' : 'DupAcksOut',
'SndLimTimeCwnd' : 'SndLimTimeCwnd',
'srcPort' : 'LocalPort',
'CurSsthresh' : 'CurSsthresh',
'MSSSent' : 'MSSSent',
'SpuriousRtoDetected' : 'SpuriousRtoDetected',
'CurAppRQueue' : 'CurAppRQueue',
'DupAcksIn' : 'DupAcksIn',
'LimCwnd' : 'LimCwnd',
'TimeStamps' : 'TimeStamps',
'MinSsthresh' : 'MinSsthresh',
'RcvRTT' : 'RcvRTT',
'SACKsRcvd' : 'SACKsRcvd',
'SendStall' : 'SendStall',
'MaxMSS' : 'MaxMSS',
'SndLimTimeRwin' : 'SndLimTimeRwin',
'SegsIn' : 'SegsIn',
'RTTVar' : 'RTTVar',
'SndLimTransCwnd' : 'SndLimTransCwnd',
'CurAppWQueue' : 'CurAppWQueue',
'OtherReductions' : 'OtherReductions',
'IpTosIn' : 'IpTosIn',
'SndInitial' : 'SndInitial',
'MaxPipeSize' : 'MaxPipeSize',
'WinScaleRcvd' : 'WinScaleRcvd',
'PreCongSumCwnd' : 'PreCongSumCwnd',
'InRecovery' : 'InRecovery',
'RetranThresh' : 'RetranThresh',
'SubsequentTimeouts' : 'SubsequentTimeouts',
'PreCongSumRTT' : 'PreCongSumRTT',
'ElapsedMicroSecs' : 'ElapsedMicroSecs',
'LocalAddressType' : 'LocalAddressType',
'DSACKDups' : 'DSACKDups',
'MinRTO' : 'MinRTO',
'MinMSS' : 'MinMSS',
'WillSendSACK' : 'WillSendSACK',
'ECN' : 'ECN',
'MaxSsthresh' : 'MaxSsthresh',
'PipeSize' : 'PipeSize',
'SumOctetsReordered' : 'SumOctetsReordered',
'MinRTT' : 'MinRTT',
'MaxCaCwnd' : 'MaxCaCwnd',
'SumRTT' : 'SumRTT',
'PostCongSumRTT' : 'PostCongSumRTT',
'RecInitial' : 'RecInitial',
'DupAckEpisodes' : 'DupAckEpisodes',
'SACKBlocksRcvd' : 'SACKBlocksRcvd',
'WillUseSACK' : 'WillUseSACK',
'srcIP' : 'LocalAddress',
'ThruOctetsAcked' : 'ThruOctetsAcked',
'MaxRwinSent' : 'MaxRwinSent',
'MaxRwinRcvd' : 'MaxRwinRcvd',
'SlowStart' : 'SlowStart',
'MaxSsCwnd' : 'MaxSsCwnd',
'SegsRetrans' : 'SegsRetrans',
'CongOverCount' : 'CongOverCount',
'LimMSS' : 'LimMSS',
'CurRTO' : 'CurRTO',
'CERcvd' : 'CERcvd',
'ElapsedSecs' : 'ElapsedSecs',
'CurRetxQueue' : 'CurRetxQueue',
'MaxAppRQueue' : 'MaxAppRQueue',
'Cwnd' : 'CurCwnd',
'SoftErrors' : 'SoftErrors',
'SndLimTransSnd' : 'SndLimTransSnd',
'CountRTT' : 'CountRTT',
'PostCongCountRTT' : 'PostCongCountRTT',
'BytesSentOut' : 'DataOctetsOut',
'StartTimeMicroSecs' : 'StartTimeMicroSecs',
'SmoothedRTT' : 'SmoothedRTT',
'RcvNxt' : 'RcvNxt',
'dstPort' : 'RemPort',
'CongAvoid' : 'CongAvoid',
'ZeroRwinSent' : 'ZeroRwinSent',
'Timeouts' : 'Timeouts',
'dstIP' : 'RemAddress',
'SndMax' : 'SndMax',
'SegmentsOut' : 'SegsOut',
'SndUna' : 'SndUna',
'MaxRetxQueue' : 'MaxRetxQueue',
'CurRwinSent' : 'CurRwinSent',
'FastRetran' : 'FastRetran',
'BytesReceived' : 'DataOctetsIn',
'LimSsthresh' : 'LimSsthresh',
'SndNxt' : 'SndNxt',
'RemAddressType' : 'RemAddressType',
'ActiveOpen' : 'ActiveOpen',
'CurTimeoutCount' : 'CurTimeoutCount',
'MaxAppWQueue' : 'MaxAppWQueue',
'MaxReasmQueue' : 'MaxReasmQueue',
'DataSegsIn' : 'DataSegsIn',
'CurMSS' : 'CurMSS'
}
web10g_var_location = {
'LocalAddressType' : 0,
'LocalAddress' : 1,
'LocalPort' : 2,
'RemAddressType' : 3,
'RemAddress' : 4,
'RemPort' : 5,
'SegsOut' : 6,
'DataSegsOut' : 7,
'DataOctetsOut' : 8,
'SegsRetrans' : 9,
'OctetsRetrans' : 10,
'SegsIn' : 11,
'DataSegsIn' : 12,
'DataOctetsIn' : 13,
'ElapsedSecs' : 14,
'ElapsedMicroSecs' : 15,
'CurMSS' : 16,
'PipeSize' : 17,
'MaxPipeSize' : 18,
'SmoothedRTT' : 19,
'CurRTO' : 20,
'CongSignals' : 21,
'CurCwnd' : 22,
'CurSsthresh' : 23,
'Timeouts' : 24,
'CurRwinSent' : 25,
'MaxRwinSent' : 26,
'ZeroRwinSent' : 27,
'CurRwinRcvd' : 28,
'MaxRwinRcvd' : 29,
'ZeroRwinRcvd' : 30,
'SndLimTransRwin' : 31,
'SndLimTransCwnd' : 32,
'SndLimTransSnd' : 33,
'SndLimTimeRwin' : 34,
'SndLimTimeCwnd' : 35,
'SndLimTimeSnd' : 36,
'SendStall' : 37,
'RetranThresh' : 38,
'NonRecovDAEpisodes' : 39,
'SumOctetsReordered' : 40,
'NonRecovDA' : 41,
'SampleRTT' : 42,
'RTTVar' : 43,
'MaxRTT' : 44,
'MinRTT' : 45,
'SumRTT' : 46,
'CountRTT' : 47,
'MaxRTO' : 48,
'MinRTO' : 49,
'IpTtl' : 50,
'IpTosIn' : 51,
'IpTosOut' : 52,
'PreCongSumCwnd' : 53,
'PreCongSumRTT' : 54,
'PostCongSumRTT' : 55,
'PostCongCountRTT' : 56,
'ECNsignals' : 57,
'DupAckEpisodes' : 58,
'RcvRTT' : 59,
'DupAcksOut' : 60,
'CERcvd' : 61,
'ECESent' : 62,
'ActiveOpen' : 63,
'MSSSent' : 64,
'MSSRcvd' : 65,
'WinScaleSent' : 66,
'WinScaleRcvd' : 67,
'TimeStamps' : 68,
'ECN' : 69,
'WillSendSACK' : 70,
'WillUseSACK' : 71,
'State' : 72,
'Nagle' : 73,
'MaxSsCwnd' : 74,
'MaxCaCwnd' : 75,
'MaxSsthresh' : 76,
'MinSsthresh' : 77,
'InRecovery' : 78,
'DupAcksIn' : 79,
'SpuriousFrDetected' : 80,
'SpuriousRtoDetected' : 81,
'SoftErrors' : 82,
'SoftErrorReason' : 83,
'SlowStart' : 84,
'CongAvoid' : 85,
'OtherReductions' : 86,
'CongOverCount' : 87,
'FastRetran' : 88,
'SubsequentTimeouts' : 89,
'CurTimeoutCount' : 90,
'AbruptTimeouts' : 91,
'SACKsRcvd' : 92,
'SACKBlocksRcvd' : 93,
'DSACKDups' : 94,
'MaxMSS' : 95,
'MinMSS' : 96,
'SndInitial' : 97,
'RecInitial' : 98,
'CurRetxQueue' : 99,
'MaxRetxQueue' : 100,
'CurReasmQueue' : 101,
'MaxReasmQueue' : 102,
'SndUna' : 103,
'SndNxt' : 104,
'SndMax' : 105,
'ThruOctetsAcked' : 106,
'RcvNxt' : 107,
'ThruOctetsReceived' : 108,
'CurAppWQueue' : 109,
'MaxAppWQueue' : 110,
'CurAppRQueue' : 111,
'MaxAppRQueue' : 112,
'LimCwnd' : 113,
'LimSsthresh' : 114,
'LimRwin' : 115,
'LimMSS' : 116,
'OtherReductionsCV' : 117,
'OtherReductionsCM' : 118,
'StartTimeSecs' : 119,
'StartTimeMicroSecs' : 120,
'Sndbuf' : 121,
'Rcvbuf' : 122
}
web10g_string_type_var = [1, 4]
def connMeasureRun(jobFlowToM, nothing):
#debugLog('conn', 'job flow to measure in conn:', jobFlowToM)
connMeasureTimestamp = 'Begin${0:6f}'.format(time.time())
#EvalLog('{0:6f},91,start connMeasure of jobFlows: {1}'.format(time.time(), jobFlowToM))
skToMByCid = web10g.IntStringDict()
skToMByTuple = web10g.StringStringDict()
skWithJobFlow = {}
statsToM = web10g.IntList()
statsToMPy = {}
for jobFlow in jobFlowToM:
#debugLog('conn', 'jobFlow: ', jobFlow, 'sk list:', agentManager.sourceJobSkList[jobFlow])
if jobFlow in agentManager.sourceJobSkList:
for sockfd in agentManager.sourceJobSkList[jobFlow]:
sk = agentManager.socketTable[sockfd]
if sk.cid:
skToMByCid[sk.cid] = sk.sockfd
else:
theTuple = sk.GetTuple()
skToMByTuple[theTuple] = sk.sockfd
if sockfd not in skWithJobFlow:
skWithJobFlow[sockfd] = []
skWithJobFlow[sockfd].append(jobFlow)
sourceJob = agentManager.sourceJobTable[jobFlow]
for name in sourceJob.measureStats:
if name in web10g_types_dict:
statsToM.append(web10g_var_location[web10g_types_dict[name]])
statsToMPy[web10g_var_location[web10g_types_dict[name]]] = None
#debugLog('conn', 'skToMByCid: ', skToMByCid, 'skToMByTuple:', skToMByTuple)
# take snapshot via web10G
statsToMPy = sorted(statsToMPy.keys())
connMeasureTimestamp += '#DoneFindSk${0:6f}${1}'.format(time.time(), (skToMByCid.size() + skToMByTuple.size()))
agentManager.evalTimestamp += '#DoneFindSk${0:6f}${1}'.format(time.time(), (skToMByCid.size() + skToMByTuple.size()))
if IsLazyTableEnabled():
if skToMByCid.size() or skToMByTuple.size():
skSnapshot = web10g.measure(skToMByCid, skToMByTuple, statsToM)
else:
skSnapshot = {}
else:
skSnapshot = web10g.measure(skToMByCid, skToMByTuple, statsToM)
#EvalLog('{0:6f},109,no lazy m: number of sockets for measurement: {1}'.format(time.time(), len(skSnapshot.keys())))
connMeasureTimestamp += '#DoneWeb10GMeasure${0:6f}${1}'.format(time.time(), (skToMByCid.size() + skToMByTuple.size()))
agentManager.evalTimestamp += '#DoneWeb10GMeasure${0:6f}${1}'.format(time.time(), (skToMByCid.size() + skToMByTuple.size()))
# generate measure results for runJobs
sockStats = {}
for jobFlow in jobFlowToM:
#EvalLog('{0:6f},115,start job data {1}'.format(time.time(), jobFlow))
measureResults = []
sourceJob = agentManager.sourceJobTable[jobFlow]
if jobFlow in agentManager.sourceJobSkList:
for sockfd in agentManager.sourceJobSkList[jobFlow]:
if (sockfd in skSnapshot):
if sockfd not in sockStats:
sockStats[sockfd] = {}
data = skSnapshot[sockfd].split('#')
agentManager.socketTable[sockfd].setCid(int(data[0]))
for i in range(len(statsToMPy)):
if statsToMPy[i] in web10g_string_type_var:
sockStats[sockfd][statsToMPy[i]] = str(data[i+1])
else:
sockStats[sockfd][statsToMPy[i]] = int(data[i+1])
#debugLog('conn', 'got snapshot: ', sockfd, \
# 'current time:', time.time(), \
# 'snapshot time:', (snapshot['StartTimeSecs']+snapshot['ElapsedSecs']+float(snapshot['StartTimeMicroSecs'])/1000000.0+float(snapshot['ElapsedMicroSecs'])/1000000.0))
result = []
for name in sourceJob.measureStats:
if name == 'BytesWritten':
result.append(agentManager.socketTable[sockfd].bytesWritten)
elif name == 'app':
result.append(agentManager.socketTable[sockfd].app)
elif name == 'srcHost':
result.append(str(get_mac()))
elif name == 'CurrentTime':
result.append(time.time())
#elif name == 'all':
# for value in snapshot.itervalues():
# result.append(value)
else:
result.append(sockStats[sockfd][web10g_var_location[web10g_types_dict[name]]])
measureResults.append(result)
#EvalLog('{0:6f},116,done job data {1}'.format(time.time(), jobFlow))
if measureResults:
(jobId, flowId) = decomposeKey(jobFlow)
(_, goFunc) = agentManager.eventAndGoFunc[jobId][flowId]
goThread = Thread(target=runGo, args=(goFunc, measureResults, jobId, flowId))
goThread.daemon = True
goThread.start()
#evalTime += '#{0:6f}'.format(time.time())
#EvalLog('{0:6f},96,done one round of conn measurement for jobFlows {1}'.format(time.time(), jobFlowToM))
connMeasureTimestamp += '#DoneOneRoundConnMeasure${0:6f}'.format(time.time())
agentManager.measureLatency += '#DoneOneRoundConnMeasure${0:6f}'.format(time.time())
LogUtil.EvalLog('OneRoundOfConnMeasure', connMeasureTimestamp)
def runGo(goFunc, data, jobId, flowId):
agentManager.evalTimestamp += '#StartRunGoOfJobFlow${0:6f}${1}${2}'.format(time.time(), jobId, flowId)
try:
#EvalLog('{0:6f},94,start go function for jobId {1} flowId {2}'.format(time.time(), jobId, flowId))
goFunc(data)
#evalTime += '#{0:6f}'.format(time.time())
except Exception, msg:
logging.warning('go thread of jobId {0} flowId {1} caught exception: {2}'.format(jobId, flowId, msg))
print 'go thread caught exception'
print msg
traceback.print_exc()
finally:
#EvalLog('{0:6f},95,done go function for jobId {1} flowId {2}'.format(time.time(), jobId, flowId))
#evalTime += '#{0:6f}'.format(time.time())
#EvalLog('{0:6f},118,{1}'.format(time.time(), evalTime))
#WriteLogs()
LogUtil.OutputEvalLog()
if __name__ == '__main__':
for key in web10g_types_dict.iterkeys():
print '\'{0}\','.format(key)
|
train_pg.py
|
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# MY_CODE_HERE
hidden = input_placeholder
for i in range(n_layers):
hidden = tf.layers.dense(hidden, size, activation, name='blah' + str(i))
return tf.layers.dense(hidden, output_size, output_activation)
def pathlength(path):
return len(path["reward"])
def reward_to_q(rewards, gamma, reward_to_go):
q = np.zeros_like(rewards)
T = len(rewards)
if reward_to_go:
q += rewards
for i in range(1, T):
q[:(T - i)] += gamma * q[i:T]
else:
r = 0
for i in range(T - 1, -1, -1):
r = rewards[i] + gamma * r
q = r * np.ones_like(q)
return q
#============================================================================================#
# Policy Gradient
#============================================================================================#
# batch_size is more natural for PG as we need to take average over paths.
# timesteps_per_batch is more relevant for Q-learning as learning is done step by step.
# CartPole
# Here is a good run
# python train_pg.py CartPole-v0 --n_layers 4 --target_reward 200 --learning_rate 1e-2 --nn_baseline --batch_size 10
# ********** Iteration 8 ************
# total trials: 90
# ----------------------------------------
# | Time | 31.1 |
# | Iteration | 8 |
# | AverageReturn | 200 |
# | StdReturn | 0 |
# | MaxReturn | 200 |
# | MinReturn | 200 |
# | EpLenMean | 200 |
# | EpLenStd | 0 |
# | TimestepsThisBatch | 2e+03 |
# | TimestepsSoFar | 1.15e+04 |
# ----------------------------------------
#
# MountainCar
# Working poorly. It seems some good exploration is needed to get any positive path.
#
# Acrobot
# Similar to MountainCar, but it is possible to randomly get a positive path,
# and then the model starts to learn.
# I can get to about 90 steps. What is the "solve" criterion?
# https://github.com/jonholifield/Acrobot-v1
# Box2D
# https://github.com/pybox2d/pybox2d/blob/master/INSTALL.md
# 'sudo' python setup.py install: should not use sudo in venv, it complains about setuptools not found
# LunarLander
# It does not do that well but works to some extent.
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
# min_timesteps_per_batch=1000,
batch_size=20,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32,
target_reward=None
):
start = time.time()
TODO = 1
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
assert discrete, 'only discrete is implemented'
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# MY_CODE_HERE
sy_logits_na = build_mlp(
sy_ob_no,
ac_dim,
"nn_policy",
n_layers=n_layers,
size=size)
sy_sampled_ac = tf.multinomial(sy_logits_na, 1) # Hint: Use the tf.multinomial op
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=sy_logits_na, labels=sy_ac_na)
else:
# YOUR_CODE_HERE
sy_mean = TODO
sy_logstd = TODO # logstd should just be a trainable variable, not a network output.
sy_sampled_ac = TODO
sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
# MY_CODE_HERE
# Loss function that we'll differentiate to get the policy gradient.
# TODO: reduce_mean is not really correct here
loss = tf.reduce_mean(sy_logprob_n * sy_adv_n)
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# MY_CODE_HERE
sy_q_n = tf.placeholder(shape=[None], name='q', dtype=tf.float32)
baseline_loss = tf.nn.l2_loss(baseline_prediction - sy_q_n)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
tf_board = os.path.join('/tmp/gube/hw2')
writer = tf.summary.FileWriter(os.path.join(tf_board, str(int(time.time()))))
writer.add_graph(sess.graph)
merged_summary = tf.summary.merge_all()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
total_trials = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
trials_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 5 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0][0] # was ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
total_trials += 1
trials_this_batch += 1
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
# if timesteps_this_batch > min_timesteps_per_batch:
# break
if trials_this_batch == batch_size:
break
total_timesteps += timesteps_this_batch
print('total trials:', total_trials)
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# MY_CODE_HERE
q_n = np.concatenate([reward_to_q(path['reward'], gamma, reward_to_go) for path in paths])
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
# MY_CODE_HERE
# The bootstrap version uses r_t + v(s_{t+1}) - v(s_t), which is biased
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no: ob_no})
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# MY_CODE_HERE
adv_mu = np.mean(adv_n)
adv_std = np.std(adv_n)
# Could be more robust than this
if adv_std == 0.0:
return
# The normalization could be problematic.
# For environments like CartPole, the reward is an integer and is capped at 200.
# When not using base, adv_n could all be 200 and adv_std = 0.
adv_n = (adv_n - adv_mu) / adv_std
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# MY_CODE_HERE
# TODO: what is the right way to fit?
# 1. Using fixed number of steps.
# It might not balance the good vs bad paths well, but 100 seems pretty good.
# 2. Using timesteps as number of steps. This is CartPole specific.
print('timesteps:', timesteps_this_batch)
for i in range(100):
sess.run(baseline_update_op, feed_dict={sy_ob_no: ob_no, sy_q_n: q_n})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# MY_CODE_HERE
sess.run(update_op, feed_dict={sy_ob_no: ob_no,
sy_ac_na: ac_na,
sy_adv_n: adv_n})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
# This stopping criterion is not robust when the batch size is small.
if target_reward is not None:
if np.mean([path["reward"].sum() for path in paths]) >= target_reward:
return
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
parser.add_argument('--target_reward', type=float, default=None)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
# min_timesteps_per_batch=args.batch_size,
batch_size=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size,
target_reward=args.target_reward
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
battleships.py
|
"""
Copyright (c) 2021 Matthew Nickson, All rights reserved
You may use, distribute and modify this code under the
terms of the MIT license which can be found in the
project root.
You should have received a copy of the MIT license with
this file. If not, please write to :
mnickson@sidingsmedia.com
or visit:
https://raw.githubusercontent.com/Computroniks/python-battleships/main/LICENSE
Python Battleships
------------------
A one sided game of battleships that was written for a
school project.
"""
#Import required modules
import platform # To get current system
import os #For file handling
import pickle #For saving game maps to disk
import hmac, hashlib #To sign pickle files to prevent remote code execution
import sys #To exit the program
import shutil #To get terminal size
import threading, itertools, time #For the spinner
import urllib.request, distutils.version #To download the help files
import json #For reading score and settings files
import string #To verify filenames
import random #To generate board
import copy #To copy nested dictionaries
#Import platform specific module for 'press any key' prompt
if(platform.system() == 'Windows'):
import msvcrt
elif(platform.system() == 'Darwin' or platform.system() == 'Linux'):
import termios
else:
sys.exit('This software only works on Windows or Unix operating systems')
class Helpers():
"""Class to hold all related helper functions
Methods
-------
anykey()
This function blocks the main thread until any key
is pressed
clearScreen()
This function runs the platform specific command to clear
the terminal window
"""
def anyKey(message:str = 'Press any key to continue...') -> None:
"""Waits for any key to be pressed
Blocks the main thread until a key is pressed
Parameters
----------
message : str, optional
The message that is displayed at the prompt.
Returns
-------
None
"""
if ('idlelib.run' in sys.modules):
input('Press enter to continue...')
elif(platform.system() == 'Windows'):
print(message, end='\r')
msvcrt.getch()
elif(platform.system() == 'Darwin' or platform.system() == 'Linux'):
print(message, end='\r')
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
else:
sys.exit('This software only works on Windows or Unix operating systems')
def clearScreen() -> None:
"""Clears the current console window
Runs the correct system command to clear the current
console window. Note this will not work in IDLE as it
runs system commands.
Returns
-------
None
"""
if ('idlelib.run' in sys.modules):
for i in range(3): #Avoid idle squeezing the text
print('\n'*49)
elif(platform.system() == 'Windows'):
os.system('cls')
elif(platform.system() == 'Darwin' or platform.system() == 'Linux'):
os.system('clear')
else:
print('\n'*100)
return
def formatFileName(unsafeFileName:str) -> str:
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in validC hars are
removed.
Parameters
----------
unsafeFileName : string
This is the user input to be sanitized and formated
Returns
-------
string
The sanitized and formated file name
"""
validChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
safeFileName = ''.join(c for c in unsafeFileName if c in validChars)
safeFileName = safeFileName.replace(' ','_') # I don't like spaces in filenames.
return safeFileName
#End class Helpers()
class Spinner():
"""This class handles the spinning icon
The little nice looking spinning icon at the end of the download message
is controlled by this class.
Methods
-------
writeNext(message, delay)
Writes the next spinner icon to the screen
removeSpinner(cleanup)
Removes the spinner
spinnerTask()
The main controler for the spinner
"""
def __init__(self, message:str, delay:float=0.1) -> None:
"""
Parameters
----------
message : str
The message to be displayed before the spinner
delay : float, optional
The delay in s between each step of the spinners cycle (default = 0.1)
Returns
-------
None
"""
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
self.delay = delay
self.busy = False
self.spinnerVisible = False
sys.stdout.write(message)
return
def writeNext(self) -> None:
"""Writes the next step of spinner
Writes the next step of the spinners cycle to the screen
Returns
-------
None
"""
with self._screen_lock:
if not self.spinnerVisible:
sys.stdout.write(next(self.spinner))
self.spinnerVisible = True
sys.stdout.flush()
return
def removeSpinner(self, cleanup:bool=False) -> None:
"""Removes the spinner
Removes the spinner from the screen
Parameters
----------
cleanup : bool
Whether to cleanup the spinner
"""
with self._screen_lock:
if self.spinnerVisible:
sys.stdout.write('\b')
self.spinnerVisible = False
if cleanup:
sys.stdout.write(' ') # overwrite spinner with blank
sys.stdout.write('\r') # move to next line
sys.stdout.flush()
return
def spinnerTask(self) -> None:
"""Controls the spinner
This method controls the function of the spinner and increments
it's position
Returns
-------
None
"""
while self.busy:
self.writeNext()
time.sleep(self.delay)
self.removeSpinner()
return
def __enter__(self) -> None:
if sys.stdout.isatty():
self._screen_lock = threading.Lock()
self.busy = True
self.thread = threading.Thread(target=self.spinnerTask)
self.thread.start()
return
def __exit__(self, exception, value, tb) -> None:
if sys.stdout.isatty():
self.busy = False
self.removeSpinner(cleanup=True)
else:
sys.stdout.write('\r')
return
#End class Spinner()
# Define custom exceptions
class Error(Exception):
"""Base class for other exceptions
This is the base class on which all custom errors are based.
Attributes
----------
message : str, optional
An explanation of the error
"""
def __init__(self, message: str = "An unexpected error occured") -> None:
"""
Calls parent class with specified message to print out
error to screen
Returns
-------
None
"""
self.message = message
super().__init__(self.message)
return None
#End class Error()
class PositionAlreadyPopulatedError(Error):
"""Raised when position is already populated
This error is raised when a ship is trying to be placed in
a position which is already populated by another ship.
Attributes
----------
message : str, optional
An explanation of the error
"""
def __init__(self, message: str = "This position is already populated") -> None:
"""
Calls parent class with specified message to print out
error to screen
Returns
-------
None
"""
self.message: str = message
super().__init__(self.message)
return
#End class PositionAlreadyPopulatedError()
class OutOfBoundsError(Error):
"""Raised when position out of bounds
This error is raised when a ship is trying to be placed in
a position which is not within the bounds of the game board
Attributes
----------
message : str, optional
An explanation of the error
"""
def __init__(self, message: str = "This position is out of bounds") -> None:
"""
Calls parent class with specified message to print out
error to screen
Returns
-------
None
"""
self.message: str = message
super().__init__(self.message)
return
#End class OutOfBoundsError()
class Settings():
"""This class handles all settings files
Anything to do with settings is dealt by this class. This includes
saveLocation.
Attributes
----------
self.saveLocation : str
This is the file path for the save location
self.settingsData : dict
This is the contents of settings.json
Methods
-------
"""
def __init__(self) -> None:
"""
Establishes current system to generate correct file path before
checking that correct directories and files exist and creating
them if they don't
Returns
-------
None
"""
#Establish what platform we are on to get correct file location
if(platform.system() == 'Windows'):
self.saveLocation = os.path.expandvars("%LOCALAPPDATA%/battleships")
elif(platform.system() == 'Darwin'):
self.saveLocation = os.path.expanduser('~/Library/battleships')
elif(platform.system() == 'Linux'):
self.saveLocation = os.path.expanduser('~/.battleships')
else:
self.saveLocation = './'
#Directories and files to create
self.dirs = [
'saved_games'
]
self.files = [
'score.json',
'settings.json',
'scores.json',
'saved_games/saves.json'
]
if(os.path.exists(self.saveLocation) == False):
try:
os.mkdir(self.saveLocation)
except OSError:
sys.exit(f"Creation of directory {self.saveLocation} failed.\n Please create this directory manually and try again.")
#Iterate through dirs and create missing
for i in self.dirs:
if (os.path.exists(os.path.join(self.saveLocation, i)) == False):
try:
os.mkdir(os.path.join(self.saveLocation, i))
except OSError:
sys.exit(f"Creation of directory {os.path.join(self.saveLocation, i)} failed.\n Please create this directory manually and try again.")
#Iterate through files and create missing
for i in self.files:
if (os.path.exists(os.path.join(self.saveLocation, i)) == False):
try:
f = open(os.path.join(self.saveLocation, i), 'w')
f.write('{}')
f.close()
except OSError:
sys.exit(f"Creation of file {os.path.join(self.saveLocation, i)} failed.\n Please create this file manually and try again.")
#Load settings.json
with open(os.path.join(self.saveLocation, 'settings.json'), 'r') as data:
self.settingsData = json.load(data)
return
def changeSetting(self, setting:str, value) -> None:
"""Changes the setting and writes change to disk
Takes the settings to change and value to change it to
and changes it in the dictionary before writing the
changes to disk.
Parameters
----------
setting : str
The setting that is to be changed
value
The value that the setting should be changed to
Returns
-------
None
"""
self.settingsData[setting] = value
with open(os.path.join(self.saveLocation, 'settings.json'), 'w') as data:
json.dump(self.settingsData, data)
return
#End class Settings()
class Board():
"""A class that handles anything to do with the game board
Attributes
----------
map : list
a 2d list that is the game board
currentShips : dict
A dictionary of all ships currently on the board and how many
hits they have recived
hits : list
A list of coordinates that have been engaged
sunkShips : list
A list of ships that have been sunk
Methods
-------
generateBoard(x, y)
Generates a board of size `x` `y`
addShip(size, posX, posY, rotDir, maxX, maxY, symbol)
Adds a ship of size `size` starting at `posX` `posY`
addRandom(x, y)
Adds all the required ships in random positions on the board
printBoard()
Prints the game board
printBoardHidden()
Prints the gameboard but hides all except hits and misses
engage(posX, posY)
Engages at a specific position
won()
Checks if all ships have been destroyed
"""
def __init__(self) -> None:
"""
Returns
-------
None
"""
self.hits: list[tuple[int, int]] = []
self.map = None
self.sunkShips:list[str] = []
self.ships = {
'A':{
'name':'Aircraft Carrier',
'size':5,
'hits':0
},
'B':{
'name':'Battleship',
'size':4,
'hits':0
},
'C':{
'name':'Cruiser',
'size':3,
'hits':0
},
'S':{
'name':'Submarine',
'size':3,
'hits':0
},
'D':{
'name':'Destroyer',
'size':2,
'hits':0
},
}
return
def generateBoard(self, x:int = 10, y:int = 10) -> None:
"""Creates a board
Creates a board of size `x` `y` and set self.map to
the generated board
Parameters
----------
x : int, optional
The width of the game board (default is 10)
y : int, optional
The height of the game board (default is 10)
Returns
-------
None
"""
self.currentShips = copy.deepcopy(self.ships) #Don't use dict.copy() as it is shallow so doesn't account for nested items
self.sunkShips:list[str] = []
self.hits:list = []
# self.hitShip:list = []
self.map: list = [[0 for i in range(x)] for j in range(y)]
return
def addShip(self, size: int, posX: int, posY: int, rotDir: bool, maxX: int, maxY: int, symbol: str) -> None:
"""Adds a ship of specified size to board starting at specified coordinates
Parameters
----------
size : int
The size of the ship.
posX : int
The x coordinate for the start of the ship
posY : int
The y coordinate for the start of the ship
rotDir : bool
The direction of the ship. True is vertical. False is horizontal.
maxX : int
The width of the board
maxY : int
The height of the board
symbol : string
The symbol to be placed on the board
Raises
------
PositionAlreadyPopulatedError
If position for ship is already taken.
OutOfBoundsError
If the position for the ship is not within the confines of the
game board.
Returns
-------
None
"""
#Check that way is clear for ship
if rotDir:
#Two seperate for loops to avoid only half placing ships
for i in range(posY, posY+size):
try:
if self.map[i][posX] != 0:
raise PositionAlreadyPopulatedError
return
except IndexError:
raise OutOfBoundsError
return
for i in range(posY, posY+size):
self.map[i][posX] = symbol
else:
for i in range(posX, posX+size):
try:
if self.map[posY][i] != 0:
raise PositionAlreadyPopulatedError
return
except IndexError:
raise OutOfBoundsError
return
for i in range(posX, posX+size):
self.map[posY][i] = symbol
return
def addRandom(self, x:int, y:int) -> None:
for key in self.ships:
while True:
self.startPos = (random.randint(0,x), random.randint(0, y))
self.rotDirection = bool(random.getrandbits(1))
try:
self.addShip(self.ships[key]['size'], self.startPos[0], self.startPos[1], self.rotDirection, x, y, key)
break
except (PositionAlreadyPopulatedError, OutOfBoundsError):
continue
def printBoard(self) -> None:
"""Prints the game board
Outputs the game board with X and Y headers
Returns
-------
None
"""
# Print x heading
print(f"|{' ':^3}|", end='')
for i in range(len(self.map[0])):
print(f'{i+1:^3}|', end='')
# Print rows with y heading
for i in range(len(self.map)):
print(f'\n|{i+1:^3}|', end='')
for j in range(len(self.map[i])):
print(f'{self.map[i][j]:^3}|', end='')
return
def printBoardHidden(self) -> None:
"""Prints the game board
This function prints out the gameboard but all items except for hits and
misses are redacted.
Returns
-------
None
"""
#temporary for debugging. remove for production
self.printBoard()
return
# Print x heading
print(f"|{' ':^3}|", end='')
for i in range(len(self.map[0])):
print(f'{i+1:^3}|', end='')
# Print rows with y heading
for i in range(len(self.map)):
print(f'\n|{i+1:^3}|', end='')
for j in range(len(self.map[i])):
if (self.map[i][j] == 'H' or self.map[i][j] == 'M'):
print(f'{self.map[i][j]:^3}|', end='')
else:
print(f"{'#':^3}|", end='')
return
def engage(self, posX: int, posY: int) -> str:
"""Engages a ship at specified position
Engages the position specified. This checks if the position has
aleady been engaged and if not engages the position. It then
returns the result of that engagement as a string.
Parameters
----------
posX : int
The x coordinate to engage
posY : int
The y coordinate to engage
Returns
-------
string
The type of ship that has been hit
"""
posX -= 1 #Account for list starting at 0 but board starting at 1
posY -= 1
if [posX, posY] in self.hits:
print('You have already engaged this position!')
return 'AE'
else:
self.hits.append([posX, posY])
self.hitShip = self.map[posY][posX]
if self.hitShip == 0:
self.map[posY][posX] = 'M'
return 'miss'
else:
self.map[posY][posX] = 'H'
self.currentShips[self.hitShip]['hits'] += 1
return self.hitShip
def isSunk(self, ship:str) -> bool:
"""Checks if ship has been sunk
Checks if the specified ship has been sunk and returns it
as a boolean value.
Parameters
----------
ship : string
The ship to check
Returns
-------
boolean
If the specified ship has been sunk or not
"""
if self.currentShips[ship]['size'] == self.currentShips[ship]['hits']:
self.sunkShips.append(ship)
return True
else:
return False
def won(self) -> bool:
"""Checks if all ships have been sunk
Checks if all the ships on the board have been sunk and
returns the status it as a boolean value.
Returns
-------
boolean
If all of the ships on the board have been sunk
"""
if len(self.sunkShips) >= 5:
return True
else:
return False
#End class Board()
class Scoring():
"""This class handles the scoring and saving of scores
Attributes
----------
score : int
The current users score
Methods
-------
showScores()
print a list of top 10 scores
"""
def __init__(self, saveLocation:str) -> None:
self.score = 0
with open(os.path.join(saveLocation, 'scores.json'), 'r') as data:
self.scoresSave = json.load(data)
return
def getScores(self, ordered:bool = False) -> dict:
if ordered:
return {k: v for k, v in sorted(self.scoresSave.items(), key=lambda item: item[1])}
else:
return self.scoresSave
def showScores(self) -> None:
"""Prints a list of the top 10 scores
Reads the contents of scores.json and then sorts by highest
before printing it to screen.
Returns
-------
None
"""
self.tempScore = dict(itertools.islice(self.getScores(True).items(), 10))
i = 0
print('Scores:')
for key in self.tempScore:
i +=1
print(f'[{i}] {key}: {self.scoresSave[key]}')
Helpers.anyKey()
return
def addScore(self, name:str, saveLocation:str, force:bool = False) -> dict:
"""Adds a score to scores file
Adds a score to the scores file with the users name as
the key. If force is not set then it checks to see if
it is going to overwrite an existing score. It returns
a dict that contains the return status and if error an
error code .
Parameters
-----------
name : string
The name to write the score under
saveLocation : string
The path to the current save location
force : bool, optional
Bypass overwriting check (default false)
Returns
-------
dict : {'status':bool, 'errCd':str}
A simple success or fail indicator. If fail returns
status false and the appropriate error code. If
success returns status true and appropriate error
code.
Error Codes
-----------
ok
Success
ovrwrt
This action will overwrite a pre-existing entry
"""
if force:
pass
else:
if name in self.scoresSave:
return {'status':False, 'errCd':'ovrwrt'}
self.scoresSave[name] = self.score
with open(os.path.join(saveLocation, 'scores.json'), 'w') as data:
json.dump(self.scoresSave, data)
return {'status':True, 'errCd':'ok'}
#End class Scoring()
class GameSave():
"""This class handles the saving and loading of game files
Methods
-------
listSave()
return a list of all saved games
saveGame()
Saves the current game to disk
loadGame()
Loads a game from disk
deleteGame()
Deletes a game from disk
"""
def __init__(self, saveLocation:str) -> None:
"""
Parameters
----------
saveLocation : string
The path to the current save location
Returns
-------
None
"""
self.defaultReturn:tuple = (None, 0, None, None, None)
with open(os.path.join(saveLocation, 'saved_games/saves.json'), 'r') as data:
self.savesFile = json.load(data)
self.saveKey:bytes = bytes('6P5OajyXaEURcLI0URJb', 'ascii') #Key for testing HMAC. Should be stored more securely
return
def listSave(self, saveLocation:str) -> list:
"""Get a list of all saved games
Parameters
----------
saveLocation : string
The path to the battleships directory
Returns
-------
list
a list of all saved games
"""
self.savedGames:list = []
for key in self.savesFile:
self.savedGames.append(key)
return self.savedGames
def saveGame(self, board:list, saveLocation:str, score:int, currentShips:dict, hits:list, sunkShips:list) -> None:
"""Saves the current gameboard
Pickles provided gameboard and then signs data using HMAC before
saving to file
Parameters
----------
board : list
The game map in list form
saveLocation : string
The path to the battleships directory
score : int
The current game score
currentShips : dict
A dictionary containing all the ships currently on the game board
hits : list
A list of all positions that have been engaged
sunkShips : list
A list of all ships that have been sunk
Returns
-------
None
"""
self.name = input('Please enter a name for this game: ')
self.pickledData = pickle.dumps(board)
self.digest = hmac.new(self.saveKey, self.pickledData, hashlib.sha256).hexdigest()
# self.savesFile[self.name] = {'fileName': Helpers.formatFileName(self.name), 'score':score, 'hash':self.digest, 'currentShips':currentShips}
self.savesFile[self.name] = {
'fileName': Helpers.formatFileName(self.name),
'score':score,
'hash':self.digest,
'currentShips':currentShips,
'hits':hits,
'sunkShips':sunkShips
}
with open(os.path.join(saveLocation, 'saved_games', f'{Helpers.formatFileName(self.name)}.pkl'), 'wb') as data:
data.write(self.pickledData)
data.close()
with open(os.path.join(saveLocation, 'saved_games/saves.json'), 'w') as data:
json.dump(self.savesFile, data)
return
def loadGame(self, saveLocation:str) -> tuple:
"""Loads a game file
Loads the relevant game files before verifying the pickled
data's signature to verify it hasn't been modified
Parameters
----------
saveLocation : str
The path to the battleships directory
Returns
-------
tuple
list
The game map loaded from file
int
The score loaded from json file
dict
A dictionary containing all the current ships on the board
list
A list of all positions that have been engaged
list
A list of all ships that have been sunk
"""
while True:
self.fileName = input('Please enter the name of the game you wish to load or input \'view\' to view all saved games: ')
if (self.fileName == 'view'):
self.saves:list = self.listSave(saveLocation)
print('Saved Games:')
for i in range(len(self.saves)):
print(f'[{i+1}] {self.saves[i]}')
else:
break
if (self.fileName in self.savesFile):
self.recvdDigest = self.savesFile[self.fileName]['hash']
with open(os.path.join(saveLocation, 'saved_games', f'{Helpers.formatFileName(self.fileName)}.pkl'), 'rb') as data:
self.pickledData = data.read()
data.close()
self.newDigest = hmac.new(self.saveKey, self.pickledData, hashlib.sha256).hexdigest()
if (self.recvdDigest != self.newDigest):
print('Integrity check failed. Game files have been modified.')
return self.defaultReturn
else:
return (
pickle.loads(self.pickledData),
self.savesFile[self.fileName]['score'],
self.savesFile[self.fileName]['currentShips'],
self.savesFile[self.fileName]['hits'],
self.savesFile[self.fileName]['sunkShips']
)
else:
print('Failed to load game files')
return self.defaultReturn
def deleteGame(self, saveLocation:str) -> bool:
"""Deletes a game file from disk
Parameters
----------
saveLocation : string
The path to the current save location
Returns
-------
bool
Success or fail of deletion
"""
while True:
self.fileName = input('Please enter the name of the game you wish to delete or input \'view\' to view all saved games: ')
if (self.fileName == 'view'):
self.saves:list = self.listSave(saveLocation)
print('Saved Games:')
for i in range(len(self.saves)):
print(f'[{i+1}] {self.saves[i]}')
else:
break
if(input(f'Are you sure you want to delete {self.fileName}? [y/N]: ').replace(' ', '').lower() == 'y'):
self.temp = self.savesFile.pop(self.fileName, None)
with open(os.path.join(saveLocation, 'saved_games/saves.json'), 'w') as data:
json.dump(self.savesFile, data)
if (self.temp is not None):
if(os.path.exists(os.path.join(saveLocation, 'saved_games', f'{self.fileName}.pkl'))):
try:
os.remove(os.path.join(saveLocation, 'saved_games', f'{self.fileName}.pkl'))
return True
except OSError:
return False
else:
return False
else:
return False
#End class GameSave()
class Game():
"""This class handles the gameplay and controls all aspects of the game
Methods
-------
mainMenu()
shows the main menu
play()
The main game loop
createNew()
Generates a new game
loadGame()
loads a game from file
settings()
open the settings menu
showHelp()
show the help prompt
quit()
exit the program
"""
def __init__(self) -> None:
self.settings = Settings()
self.saveLocation:str = self.settings.saveLocation
self.scoreKeep = Scoring(self.saveLocation)
self.savedGames = GameSave(self.saveLocation)
self.gameboard = Board()
self.mainMenu()
def mainMenu(self) -> None:
"""Show the main menu"""
self.choiceMap = {
1: self.play,
2: self.createNew,
3: self.loadGame,
4: self.deleteSave,
5: self.showSave,
6: self.scoreKeep.showScores,
7: self.settingsOptions,
8: self.showHelp,
9: self.quit
}
while True:
print('Welcome to Battle Ships\nPlease choose an option:')
self.choice:int = 0
print('[1] Play\n[2] Start A New Game\n[3] Load a saved game\n[4] Delete a saved game\n[5] View saved games\n[6] View Scores\n[7] Settings\n[8] Help and troubleshooting\n[9] Quit')
while self.choice not in range(1,10):
try:
self.choice = int(input('Please choose an option [1-9]: '))
except ValueError:
pass
Helpers.clearScreen()
self.choiceMap[self.choice]()
Helpers.clearScreen()
def play(self) -> None:
"""The main game loop
This is the main game loop for battleships. This is where
all of the main game logic is.
Returns
-------
None
"""
#If no game loaded create new one
if(self.gameboard.map == None):
if self.createNew():
pass
else:
return
#Get gameboard height and width
self.xy = [len(self.gameboard.map[0]), len(self.gameboard.map)]
print('To exit press CTRL + C at any time')
#Game loop
while True:
if self.gameboard.won():
print('All of the enemies ships have been destroyed. You win!')
print(f'Your score is {self.scoreKeep.score}')
if input('Would you like to save your score? [y/N]: ').lower().replace(' ', '') == 'y':
self.name = input('Please enter your name: ')
self.saveResponse = self.scoreKeep.addScore(self.name, self.saveLocation)
if self.saveResponse['status']:
print('Score saved successfully')
elif self.saveResponse['status'] == False and self.saveResponse['errCd'] == 'ovrwrt':
if input('You are about to overwrite an existing entry! Are you sure you want to proceed? [y/N]: ').lower().replace(' ', '') == 'y':
self.scoreKeep.addScore(self.name, self.saveLocation, True)
else:
pass
else:
pass
Helpers.anyKey()
Helpers.clearScreen()
break
try:
print(f'Current score: {self.scoreKeep.score}')
self.gameboard.printBoardHidden()
print('')
#Get coordinates to engage
self.error = False
while True:
self.coordinates:list = input('Please enter the X and Y coordinates you wish to engage seperated by a comma: ').replace(' ', '').split(',')
if (not (len(self.coordinates) == 2)):
self.error = True
for i in range(len(self.coordinates)):
try:
self.coordinates[i] = int(self.coordinates[i])
except ValueError:
self.error = True
if (not (self.coordinates[i] in range(self.xy[i]+1))):
self.error = True
if (self.error):
self.error = False
print('Invalid coordinates')
continue
else:
break
self.engageResult = self.gameboard.engage(self.coordinates[0], self.coordinates[1])
if self.engageResult is not None:
if self.engageResult == 'miss':
print('Miss')
self.scoreKeep.score -= 1
elif self.engageResult == 'AE':
pass
else:
if self.gameboard.isSunk(self.engageResult):
print(f'You sunk a {self.gameboard.ships[self.engageResult]["name"]}')
else:
print(f'You hit a {self.gameboard.ships[self.engageResult]["name"]}')
except KeyboardInterrupt:
Helpers.clearScreen()
print('[1] Save and exit\n[2] Exit without saving\n[3] Return to game')
while True:
try:
self.choice = int(input('Please enter an option [1-3]: '))
break
except ValueError:
pass
if (self.choice == 1):
self.savedGames.saveGame(
self.gameboard.map,
self.settings.saveLocation,
self.scoreKeep.score,
self.gameboard.currentShips,
self.gameboard.hits,
self.gameboard.sunkShips
)
print('Game saved')
Helpers.anyKey()
return
elif (self.choice == 2):
if (input('Are you sure? [y/N]: ').replace(' ', '').lower() == 'y'):
return
else:
pass
time.sleep(2)
Helpers.clearScreen()
return
def createNew(self) -> None:
"""Create a new game
Creates a new game board acording to the users specifications
Returns
-------
None
"""
while True:
try:
self.width: int = int(input('Please enter the board width: '))
if (self.width < 1):
print('The minimum board width is 1')
continue
elif (len(str(abs(self.width))) > 3):
print('The maximum board width is 999')
continue
else:
break
except ValueError:
print('Please enter a valid number!')
while True:
try:
self.height: int = int(
input('Please enter the board height: '))
if (self.height < 1):
print('The minimum board height is 1')
continue
elif (len(str(abs(self.height))) > 3):
print('The maximum board height is 999')
continue
else:
break
except ValueError:
print('Please enter a valid number!')
if (self.width < 5 or self.height < 5) and (self.width * self.height < 20):
print('Board is too small!')
Helpers.anyKey()
Helpers.clearScreen()
return
with Spinner('Placing Ships'):
self.error = False
self.gameboard.generateBoard(self.width, self.height)
self.gameboard.addRandom(self.width, self.height)
self.scoreKeep.score = (self.width + self.height) * 2
if self.error:
print('Failed to place ships.\nTry making the board larger.')
self.error = False
Helpers.anyKey()
Helpers.clearScreen()
return False
else:
print('\nGame created')
Helpers.anyKey()
Helpers.clearScreen()
return True
def loadGame(self) -> None:
"""Load a game file from disk
Loads specified game from disk.
Returns
-------
None
"""
self.gameMap, self.scoreKeep.score, self.gameboard.currentShips, self.gameboard.hits, self.gameboard.sunkShips = self.savedGames.loadGame(self.saveLocation)
if (self.gameMap == None):
pass
else:
self.gameboard.map = self.gameMap
print('Loaded game files')
Helpers.anyKey()
Helpers.clearScreen()
return
def showSave(self) -> None:
"""Prints a list of saved games
Prints a list of all games in `saveLocation/saved_games`
Returns
-------
None
"""
self.saves:list = self.savedGames.listSave(self.saveLocation)
print('Saved Games:')
for i in range(len(self.saves)):
print(f'[{i+1}] {self.saves[i]}')#FIXME: outputs file exension
Helpers.anyKey()
Helpers.clearScreen()
return
def deleteSave(self) -> None:
if(self.savedGames.deleteGame(self.saveLocation)):
print('Game deleted')
else:
print('Failed to delete game')
Helpers.anyKey()
Helpers.clearScreen()
def settingsOptions(self) -> None: #TODO: Add ability to adjust settings
"""Show the settings dialog
Opens the settings dialog with with options to set `saveLocation`
Returns
-------
None
"""
print('Change Settings')
print('Settings should only be changed by experienced users. CHANGING THEM MAY BREAK YOUR GAME!')
if input('Are you sure you want to continue? [y/N]: ').lower().replace(' ', '') != 'y':
pass
elif len(self.settings.settingsData) == 0:
print('There are no settings to change')
else:
while True:
self.choice = input('Please enter the name of the setting you wish to change or enter `view` to view all settings: ').replace(' ', '')
if self.choice == 'view':
print('{: <20} {: <20}'.format('Setting', 'Value'))
for key in self.settings.settingsData:
print(f'{key: <20} {self.settings.settingsData[key]: <20}')
if self.choice in self.settings.settingsData:
self.settingVal = input('Please enter the value you wish to change the setting to: ')
self.settings.changeSetting(self.choice, self.settingVal)
print('Setting changed')
break
else:
print('Setting does not exist')
Helpers.anyKey()
Helpers.clearScreen()
pass
def showHelp(self) -> None:
"""Output the help text
Downloads help file if not already downloaded and then displays it
page by page.
Returns
-------
None
"""
self.error = False
with Spinner("Getting current help version"):
try:
self.response = urllib.request.urlopen('https://raw.githubusercontent.com/Computroniks/python-battleships/main/assets/HELPVER')
self.newHelpVer = self.response.read().decode('utf-8')
except urllib.error.URLError:
self.newHelpVer = '1.0.0'
Helpers.clearScreen()
if ('helpVer' in self.settings.settingsData):
self.currentHelpVer = self.settings.settingsData['helpVer']
else:
self.currentHelpVer = '1.0.0'
if(os.path.exists(os.path.join(self.saveLocation, 'help.txt')) == False) or (distutils.version.LooseVersion(self.newHelpVer) > distutils.version.LooseVersion(self.currentHelpVer)):
self.settings.changeSetting('helpVer', self.newHelpVer)
with Spinner('Downloading help files'):
try:
time.sleep(0.1)
urllib.request.urlretrieve('https://raw.githubusercontent.com/Computroniks/python-battleships/main/assets/help.txt', os.path.join(self.saveLocation, 'help.txt'))
time.sleep(0.1)
print('\nDone')
except urllib.error.URLError:
self.error = True
if (self.error):
print('\nFailed to download help files. Please make sure you are connected to the internet.')
Helpers.anyKey()
Helpers.clearScreen()
return
print('Help and troubleshooting')
print('To continue to the next page press any key.')
Helpers.anyKey()
Helpers.clearScreen()
with open(os.path.join(self.saveLocation, 'help.txt')) as rfile:
self.helpContent = rfile.readlines()
rfile.close()
self.columns, self.rows = shutil.get_terminal_size()
self.oldRows = self.rows
for i in range(len(self.helpContent)):
print(self.helpContent[i], end=(''))
if(i == self.rows):
self.rows += self.oldRows
Helpers.anyKey('--MORE--')
print(' '*15, end='\r')#Make sure that --MORE-- is removed even if line is blank space
print()
Helpers.anyKey('--END--')
Helpers.clearScreen()
return
def quit(self) -> None:
"""Confirm and exit the program
Asks user if they really want to quit. Default it no.
Returns
-------
None
"""
while True:
self.quitC:str = input('Are you sure you want to quit? [y/N]').lower().replace(' ', '')
if (self.quitC == 'y'):
print('Bye')
sys.exit()
else:
Helpers.clearScreen()
return
#End class Game()
if __name__ == '__main__':
Helpers.clearScreen()
app = Game()
|
learn.py
|
# # Unity ML-Agents Toolkit
# ## ML-Agent Learning
import logging
import os
import multiprocessing
import numpy as np
from docopt import docopt
from unitytrainers.trainer_controller import TrainerController
from unitytrainers.exception import TrainerError
def run_training(sub_id, use_seed, options):
# Docker Parameters
if options['--docker-target-name'] == 'Empty':
docker_target_name = ''
else:
docker_target_name = options['--docker-target-name']
# General parameters
run_id = options['--run-id']
num_runs = int(options['--num-runs'])
seed = int(options['--seed'])
load_model = options['--load']
train_model = options['--train']
save_freq = int(options['--save-freq'])
env_path = options['<env>']
keep_checkpoints = int(options['--keep-checkpoints'])
worker_id = int(options['--worker-id'])
curriculum_file = str(options['--curriculum'])
if curriculum_file == "None":
curriculum_file = None
lesson = int(options['--lesson'])
fast_simulation = not bool(options['--slow'])
no_graphics = options['--no-graphics']
# Constants
# Assumption that this yaml is present in same dir as this file
base_path = os.path.dirname(__file__)
TRAINER_CONFIG_PATH = os.path.abspath(os.path.join(base_path, "trainer_config.yaml"))
if env_path is None and num_runs > 1:
raise TrainerError("It is not possible to launch more than one concurrent training session "
"when training from the editor")
tc = TrainerController(env_path, run_id + "-" + str(sub_id), save_freq, curriculum_file, fast_simulation,
load_model, train_model, worker_id + sub_id, keep_checkpoints, lesson, use_seed,
docker_target_name, TRAINER_CONFIG_PATH, no_graphics)
tc.start_learning()
if __name__ == '__main__':
print('''
▄▄▄▓▓▓▓
╓▓▓▓▓▓▓█▓▓▓▓▓
,▄▄▄m▀▀▀' ,▓▓▓▀▓▓▄ ▓▓▓ ▓▓▌
▄▓▓▓▀' ▄▓▓▀ ▓▓▓ ▄▄ ▄▄ ,▄▄ ▄▄▄▄ ,▄▄ ▄▓▓▌▄ ▄▄▄ ,▄▄
▄▓▓▓▀ ▄▓▓▀ ▐▓▓▌ ▓▓▌ ▐▓▓ ▐▓▓▓▀▀▀▓▓▌ ▓▓▓ ▀▓▓▌▀ ^▓▓▌ ╒▓▓▌
▄▓▓▓▓▓▄▄▄▄▄▄▄▄▓▓▓ ▓▀ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▄ ▓▓▌
▀▓▓▓▓▀▀▀▀▀▀▀▀▀▀▓▓▄ ▓▓ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▐▓▓
^█▓▓▓ ▀▓▓▄ ▐▓▓▌ ▓▓▓▓▄▓▓▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▓▄ ▓▓▓▓`
'▀▓▓▓▄ ^▓▓▓ ▓▓▓ └▀▀▀▀ ▀▀ ^▀▀ `▀▀ `▀▀ '▀▀ ▐▓▓▌
▀▀▀▀▓▄▄▄ ▓▓▓▓▓▓, ▓▓▓▓▀
`▀█▓▓▓▓▓▓▓▓▓▌
¬`▀▀▀█▓
''')
logger = logging.getLogger("unityagents")
_USAGE = '''
Usage:
learn (<env>) [options]
learn [options]
learn --help
Options:
--curriculum=<file> Curriculum json file for environment [default: None].
--keep-checkpoints=<n> How many model checkpoints to keep [default: 5].
--lesson=<n> Start learning from this lesson [default: 0].
--load Whether to load the model or randomly initialize [default: False].
--run-id=<path> The sub-directory name for model and summary statistics [default: ppo].
--num-runs=<n> Number of concurrent training sessions [default: 1].
--save-freq=<n> Frequency at which to save model [default: 50000].
--seed=<n> Random seed used for training [default: -1].
--slow Whether to run the game at training speed [default: False].
--train Whether to train model, or only run inference [default: False].
--worker-id=<n> Number to add to communication port (5005). Used for multi-environment [default: 0].
--docker-target-name=<dt> Docker Volume to store curriculum, executable and model files [default: Empty].
--no-graphics Whether to run the Unity simulator in no-graphics mode [default: False].
'''
options = docopt(_USAGE)
logger.info(options)
num_runs = int(options['--num-runs'])
seed = int(options['--seed'])
jobs = []
# if seed == -1:
# use_seed = np.random.randint(0, 9999)
# else:
# use_seed = seed
# run_training(0,use_seed)
for i in range(num_runs):
if seed == -1:
use_seed = np.random.randint(0, 9999)
else:
use_seed = seed
p = multiprocessing.Process(target=run_training, args=(i, use_seed, options))
jobs.append(p)
p.start()
|
ble2lsl.py
|
"""Interfacing between Bluetooth Low Energy and Lab Streaming Layer protocols.
Interfacing with devices over Bluetooth Low Energy (BLE) is achieved using the
`Generic Attribute Profile`_ (GATT) standard procedures for data transfer.
Reading and writing of GATT descriptors is provided by the `pygatt`_ module.
All classes streaming data through an LSL outlet should subclass
`BaseStreamer`.
Also includes dummy streamer objects, which do not acquire data over BLE but
pass local data through an LSL outlet, e.g. for testing.
TODO:
* AttrDict for attribute-like dict access from device PARAMS?
.. _Generic Attribute Profile:
https://www.bluetooth.com/specifications/gatt/generic-attributes-overview
.. _pygatt:
https://github.com/peplin/pygatt
"""
from queue import Queue
from struct import error as StructError
import threading
import time
from warnings import warn
import numpy as np
import pygatt
from pygatt.backends.bgapi.exceptions import ExpectedResponseTimeout
import pylsl as lsl
import serial
INFO_ARGS = ['type', 'channel_count', 'nominal_srate', 'channel_format']
class BaseStreamer:
"""Base class for streaming data through an LSL outlet.
Prepares `pylsl.StreamInfo` and `pylsl.StreamOutlet` objects as well as
data buffers for handling of incoming chunks.
Subclasses must implement `start` and `stop` methods for stream control.
TODO:
* Public access to outlets and stream info?
* Push chunks, not samples (have to generate intra-chunk timestamps anyway)
"""
def __init__(self, device, subscriptions=None, time_func=time.time,
ch_names=None, **kwargs):
"""Construct a `BaseStreamer` object.
Args:
device: A device module in `ble2lsl.devices`.
time_func (function): Function for generating timestamps.
subscriptions (Iterable[str]): Types of device data to stream.
Some subset of `SUBSCRIPTION_NAMES`.
ch_names (dict[Iterable[str]]): User-defined channel names.
e.g. `{'EEG': ('Ch1', 'Ch2', 'Ch3', 'Ch4')}`.
"""
self._device = device
if subscriptions is None:
subscriptions = get_default_subscriptions(device)
self._subscriptions = tuple(subscriptions)
self._time_func = time_func
self._user_ch_names = ch_names if ch_names is not None else {}
self._stream_params = self._device.PARAMS['streams']
self._chunk_idxs = stream_idxs_zeros(self._subscriptions)
self._chunks = empty_chunks(self._stream_params,
self._subscriptions)
# StreamOutlet.push_chunk doesn't like single-sample chunks...
# but want to keep using push_chunk for intra-chunk timestamps
# doing this beforehand to avoid a chunk size check for each push
chunk_size = self._stream_params["chunk_size"]
self._push_func = {name: (self._push_chunk_as_sample
if chunk_size[name] == 1
else self._push_chunk)
for name in self._subscriptions}
def start(self):
"""Begin streaming through the LSL outlet."""
raise NotImplementedError()
def stop(self):
"""Stop/pause streaming through the LSL outlet."""
raise NotImplementedError()
def _init_lsl_outlets(self):
"""Call in subclass after acquiring address."""
self._info = {}
self._outlets = {}
for name in self._subscriptions:
info = {arg: self._stream_params[arg][name] for arg in INFO_ARGS}
outlet_name = '{}-{}'.format(self._device_id, name)
self._info[name] = lsl.StreamInfo(outlet_name, **info,
source_id=self._device_id)
self._add_device_info(name)
chunk_size = self._stream_params["chunk_size"][name]
self._outlets[name] = lsl.StreamOutlet(self._info[name],
chunk_size=chunk_size,
max_buffered=360)
def _push_chunk(self, name, timestamp):
self._outlets[name].push_chunk(self._chunks[name].tolist(),
timestamp)
def _push_chunk_as_sample(self, name, timestamp):
self._outlets[name].push_sample(self._chunks[name].tolist()[0],
timestamp)
def _add_device_info(self, name):
"""Adds device-specific parameters to `info`."""
desc = self._info[name].desc()
try:
desc.append_child_value("manufacturer", self._device.MANUFACTURER)
except KeyError:
warn("Manufacturer not specified in device file")
desc.append_child_value("address", self._address)
channels = desc.append_child("channels")
try:
ch_names = self._stream_params["ch_names"][name]
# use user-specified ch_names if available and right no. channels
if name in self._user_ch_names:
user_ch_names = self._user_ch_names[name]
if len(user_ch_names) == len(ch_names):
if len(user_ch_names) == len(set(user_ch_names)):
ch_names = user_ch_names
else:
print("Non-unique names in user-defined {} ch_names; "
.format(name), "using default ch_names.")
else:
print("Wrong # of channels in user-defined {} ch_names; "
.format(name), "using default ch_names.")
for c, ch_name in enumerate(ch_names):
unit = self._stream_params["units"][name][c]
type_ = self._stream_params["type"][name]
channels.append_child("channel") \
.append_child_value("label", ch_name) \
.append_child_value("unit", unit) \
.append_child_value("type", type_)
except KeyError:
raise ValueError("Channel names, units, or types not specified")
@property
def subscriptions(self):
"""The names of the subscribed streams."""
return self._subscriptions
class Streamer(BaseStreamer):
"""Streams data to an LSL outlet from a BLE device.
TODO:
* Try built-in LSL features for intra-chunk timestamps (StreamOutlet)
* initialize_timestamping: should indices be reset to 0 mid-streaming?
"""
def __init__(self, device, address=None, backend='bgapi', interface=None,
autostart=True, scan_timeout=10.5, internal_timestamps=False,
**kwargs):
"""Construct a `Streamer` instance for a given device.
Args:
device (dict): A device module in `ble2lsl.devices`.
For example, `ble2lsl.devices.muse2016`.
Provides info on BLE characteristics and device metadata.
address (str): Device MAC address for establishing connection.
By default, this is acquired automatically using device name.
backend (str): Which `pygatt` backend to use.
Allowed values are `'bgapi'` or `'gatt'`. The `'gatt'` backend
only works on Linux under the BlueZ protocol stack.
interface (str): The identifier for the BLE adapter interface.
When `backend='gatt'`, defaults to `'hci0'`.
autostart (bool): Whether to start streaming on instantiation.
scan_timeout (float): Seconds before timeout of BLE adapter scan.
internal_timestamps (bool): Use internal timestamping.
If `False` (default), uses initial timestamp, nominal sample
rate, and device-provided sample ID to determine timestamp.
If `True` (or when sample IDs not provided), generates
timestamps at the time of chunk retrieval, only using
nominal sample rate as need to determine timestamps within
chunks.
"""
BaseStreamer.__init__(self, device=device, **kwargs)
self._transmit_queue = Queue()
self._ble_params = self._device.PARAMS["ble"]
self._address = address
# use internal timestamps if requested, or if stream is variable rate
# (LSL uses nominal_srate=0.0 for variable rates)
nominal_srates = self._stream_params["nominal_srate"]
self._internal_timestamps = {name: (internal_timestamps
if nominal_srates[name] else True)
for name in device.STREAMS}
self._start_time = stream_idxs_zeros(self._subscriptions)
self._first_chunk_idxs = stream_idxs_zeros(self._subscriptions)
# initialize gatt adapter
if backend == 'bgapi':
self._adapter = pygatt.BGAPIBackend(serial_port=interface)
elif backend in ['gatt', 'bluez']:
# only works on Linux
interface = self.interface or 'hci0'
self._adapter = pygatt.GATTToolBackend(interface)
else:
raise(ValueError("Invalid backend specified; use bgapi or gatt."))
self._backend = backend
self._scan_timeout = scan_timeout
self._transmit_thread = threading.Thread(target=self._transmit_chunks)
if autostart:
self.connect()
self.start()
def _init_timestamp(self, name, chunk_idx):
"""Set the starting timestamp and chunk index for a subscription."""
self._first_chunk_idxs[name] = chunk_idx
self._start_time[name] = self._time_func()
def start(self):
"""Start streaming by writing to the send characteristic."""
self._transmit_thread.start()
self._ble_device.char_write(self._ble_params['send'],
value=self._ble_params['stream_on'],
wait_for_response=False)
def stop(self):
"""Stop streaming by writing to the send characteristic."""
self._ble_device.char_write(self._ble_params["send"],
value=self._ble_params["stream_off"],
wait_for_response=False)
def send_command(self, value):
"""Write some value to the send characteristic."""
self._ble_device.char_write(self._ble_params["send"],
value=value,
wait_for_response=False)
def disconnect(self):
"""Disconnect from the BLE device and stop the adapter.
Note:
After disconnection, `start` will not resume streaming.
TODO:
* enable device reconnect with `connect`
"""
self.stop() # stream_off command
self._ble_device.disconnect() # BLE disconnect
self._adapter.stop()
def connect(self, max_attempts=20):
"""Establish connection to BLE device (prior to `start`).
Starts the `pygatt` adapter, resolves the device address if necessary,
connects to the device, and subscribes to the channels specified in the
device parameters.
"""
for _ in range(max_attempts):
try:
self._adapter.start()
break
except pygatt.exceptions.NotConnectedError as notconnected_error:
# dongle not connected
continue
except (ExpectedResponseTimeout, StructError):
continue
except OSError as os_error:
if os_error.errno == 6:
# "device not configured"
print(os_error)
continue
else:
raise os_error
except serial.serialutil.SerialException as serial_exception:
# NOTE: some of these may be raised (apparently harmlessly) by
# the self._adapter._receiver thread, which can't be captured
# here; maybe there is a way to prevent writing to stdout though
if serial_exception.errno == 6:
# "couldn't open port"
print(serial_exception)
continue
else:
raise serial_exception
except pygatt.backends.bgapi.exceptions.BGAPIError as bgapi_error:
# adapter not connected?
continue
time.sleep(0.1)
if self._address is None:
# get the device address if none was provided
self._device_id, self._address = \
self._resolve_address(self._device.NAME)
try:
self._ble_device = self._adapter.connect(self._address,
address_type=self._ble_params['address_type'],
interval_min=self._ble_params['interval_min'],
interval_max=self._ble_params['interval_max'])
except pygatt.exceptions.NotConnectedError:
e_msg = "Unable to connect to device at address {}" \
.format(self._address)
raise(IOError(e_msg))
# initialize LSL outlets and packet handler
self._init_lsl_outlets()
self._packet_handler = self._device.PacketHandler(self)
# subscribe to receive characteristic notifications
process_packet = self._packet_handler.process_packet
for name in self._subscriptions:
try:
uuids = [self._ble_params[name] + '']
except TypeError:
uuids = self._ble_params[name]
for uuid in uuids:
if uuid:
self._ble_device.subscribe(uuid, callback=process_packet)
# subscribe to recieve simblee command from ganglion doc
def _resolve_address(self, name):
list_devices = self._adapter.scan(timeout=self._scan_timeout)
for device in list_devices:
if name in device['name']:
return device['name'], device['address']
raise(ValueError("No devices found with name `{}`".format(name)))
def _transmit_chunks(self):
"""TODO: missing chunk vs. missing sample"""
# nominal duration of chunks for progressing non-internal timestamps
chunk_period = {name: (self._stream_params["chunk_size"][name]
/ self._stream_params["nominal_srate"][name])
for name in self._subscriptions
if not self._internal_timestamps[name]}
first_idx = self._first_chunk_idxs
while True:
name, chunk_idx, chunk = self._transmit_queue.get()
self._chunks[name][:, :] = chunk
# update chunk index records and report missing chunks
# passing chunk_idx=-1 to the queue averts this (ex. status stream)
if not chunk_idx == -1:
if self._chunk_idxs[name] == 0:
self._init_timestamp(name, chunk_idx)
self._chunk_idxs[name] = chunk_idx - 1
if not chunk_idx == self._chunk_idxs[name] + 1:
print("Missing {} chunk {}: {}"
.format(name, chunk_idx, self._chunk_idxs[name]))
self._chunk_idxs[name] = chunk_idx
else:
# track number of received chunks for non-indexed streams
self._chunk_idxs[name] += 1
# generate timestamp; either internally or
if self._internal_timestamps[name]:
timestamp = self._time_func()
else:
timestamp = chunk_period[name] * (chunk_idx - first_idx[name])
timestamp += self._start_time[name]
self._push_func[name](name, timestamp)
@property
def backend(self):
"""The name of the `pygatt` backend used by the instance."""
return self._backend
@property
def address(self):
"""The MAC address of the device."""
return self._address
class Dummy(BaseStreamer):
"""Mimicks a device and pushes local data into an LSL outlet.
TODO:
* verify timestamps/delays (seems too fast in plot.Lines)
"""
def __init__(self, device, chunk_iterator=None, subscriptions=None,
autostart=True, **kwargs):
"""Construct a `Dummy` instance.
Args:
device: BLE device to impersonate (i.e. from `ble2lsl.devices`).
chunk_iterator (generator): Class that iterates through chunks.
autostart (bool): Whether to start streaming on instantiation.
"""
nominal_srate = device.PARAMS["streams"]["nominal_srate"]
if subscriptions is None:
subscriptions = get_default_subscriptions(device)
subscriptions = {name for name in subscriptions
if nominal_srate[name] > 0}
BaseStreamer.__init__(self, device=device, subscriptions=subscriptions,
**kwargs)
self._device_id = "{}-DUMMY".format(device.NAME)
self._address = "DUMMY"
self._init_lsl_outlets()
chunk_shapes = {name: self._chunks[name].shape
for name in self._subscriptions}
self._delays = {name: 1 / (nominal_srate[name] / chunk_shapes[name][1])
for name in self._subscriptions}
# generate or load fake data
if chunk_iterator is None:
chunk_iterator = NoisySinusoids
self._chunk_iter = {name: chunk_iterator(chunk_shapes[name],
nominal_srate[name])
for name in self._subscriptions}
# threads to mimic incoming BLE data
self._threads = {name: threading.Thread(target=self._stream,
kwargs=dict(name=name))
for name in self._subscriptions}
if autostart:
self.start()
def start(self):
"""Start pushing data into the LSL outlet."""
self._proceed = True
for name in self._subscriptions:
self._threads[name].start()
def stop(self):
"""Stop pushing data. Ends execution of chunk streaming threads.
Restart requires a new `Dummy` instance.
"""
self._proceed = False
def _stream(self, name):
"""Run in thread to mimic periodic hardware input."""
for chunk in self._chunk_iter[name]:
if not self._proceed:
# dummy has received stop signal
break
self._chunks[name] = chunk
timestamp = time.time()
self._push_func[name](name, timestamp)
delay = self._delays[name]
# some threads may have long delays;
# subdivide these so threads can stop within ~1 s
while delay > 1 and self._proceed:
time.sleep(1)
delay -= 1
time.sleep(delay % 1)
def make_chunk(self, chunk_ind):
"""Prepare a chunk from the totality of local data.
TODO:
* replaced when using an iterator
"""
self._chunks
# TODO: more realistic timestamps
timestamp = self._time_func()
self._timestamps = np.array([timestamp]*self._chunk_size)
def stream_idxs_zeros(subscriptions):
"""Initialize an integer index for each subscription."""
idxs = {name: 0 for name in subscriptions}
return idxs
def empty_chunks(stream_params, subscriptions):
"""Initialize an empty chunk array for each subscription."""
chunks = {name: np.zeros((stream_params["chunk_size"][name],
stream_params["channel_count"][name]),
dtype=stream_params["numpy_dtype"][name])
for name in subscriptions}
return chunks
def get_default_subscriptions(device, pos_rate=False):
# look for default list; if unavailable, subscribe to all
try:
subscriptions = device.DEFAULT_SUBSCRIPTIONS
except AttributeError:
subscriptions = device.STREAMS
if pos_rate:
subscriptions = [name for name in subscriptions
if device.PARAMS['streams']['nominal_srate'][name] > 0]
return subscriptions
class ChunkIterator:
"""Generator object (i.e. iterator) that yields chunks.
Placeholder until I figure out how this might work as a base class.
"""
def __init__(self, chunk_shape, srate):
self._chunk_shape = chunk_shape
self._srate = srate
class NoisySinusoids(ChunkIterator):
"""Iterator class to provide noisy sinusoidal chunks of data."""
def __init__(self, chunk_shape, srate, freqs=[5, 10, 12, 20], noise_std=1):
super().__init__(chunk_shape=chunk_shape, srate=srate)
self._ang_freqs = 2 * np.pi * np.array(freqs)
self._speriod = 1 / self._srate
self._chunk_t_incr = (1 + chunk_shape[0]) / self._srate
self._freq_amps = np.random.randint(1, 5, len(freqs))
self._noise_std = noise_std
def __iter__(self):
self._t = (np.arange(self._chunk_shape[0]).reshape((-1, 1))
* self._speriod)
return self
def __next__(self):
# start with noise
chunk = np.random.normal(0, self._noise_std, self._chunk_shape)
# sum frequencies with random amplitudes
for i, freq in enumerate(self._ang_freqs):
chunk += self._freq_amps[i] * np.sin(freq * self._t)
self._t += self._chunk_t_incr
return chunk
|
test_rpc.py
|
import os
import time
import socket
import dgl
import backend as F
import unittest, pytest
import multiprocessing as mp
from numpy.testing import assert_array_equal
if os.name != 'nt':
import fcntl
import struct
INTEGER = 2
STR = 'hello world!'
HELLO_SERVICE_ID = 901231
TENSOR = F.zeros((10, 10), F.int64, F.cpu())
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def foo(x, y):
assert x == 123
assert y == "abc"
class MyRequest(dgl.distributed.Request):
def __init__(self):
self.x = 123
self.y = "abc"
self.z = F.randn((3, 4))
self.foo = foo
def __getstate__(self):
return self.x, self.y, self.z, self.foo
def __setstate__(self, state):
self.x, self.y, self.z, self.foo = state
def process_request(self, server_state):
pass
class MyResponse(dgl.distributed.Response):
def __init__(self):
self.x = 432
def __getstate__(self):
return self.x
def __setstate__(self, state):
self.x = state
def simple_func(tensor):
return tensor
class HelloResponse(dgl.distributed.Response):
def __init__(self, hello_str, integer, tensor):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
def __getstate__(self):
return self.hello_str, self.integer, self.tensor
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor = state
class HelloRequest(dgl.distributed.Request):
def __init__(self, hello_str, integer, tensor, func):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
self.func = func
def __getstate__(self):
return self.hello_str, self.integer, self.tensor, self.func
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor, self.func = state
def process_request(self, server_state):
assert self.hello_str == STR
assert self.integer == INTEGER
new_tensor = self.func(self.tensor)
res = HelloResponse(self.hello_str, self.integer, new_tensor)
return res
def start_server(num_clients, ip_config, server_id=0):
print("Sleep 5 seconds to test client re-connect.")
time.sleep(5)
server_state = dgl.distributed.ServerState(None, local_g=None, partition_book=None)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
print("Start server {}".format(server_id))
dgl.distributed.start_server(server_id=server_id,
ip_config=ip_config,
num_servers=1,
num_clients=num_clients,
server_state=server_state)
def start_client(ip_config):
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.connect_to_server(ip_config=ip_config, num_servers=1)
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
# test send and recv
dgl.distributed.send_request(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test send_request_to_machine
dgl.distributed.send_request_to_machine(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call_to_machine
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call_to_machine(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
def test_serialize():
os.environ['DGL_DIST_MODE'] = 'distributed'
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload
SERVICE_ID = 12345
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
req1 = deserialize_from_payload(MyRequest, data, tensors)
req1.foo(req1.x, req1.y)
assert req.x == req1.x
assert req.y == req1.y
assert F.array_equal(req.z, req1.z)
res = MyResponse()
data, tensors = serialize_to_payload(res)
res1 = deserialize_from_payload(MyResponse, data, tensors)
assert res.x == res1.x
def test_rpc_msg():
os.environ['DGL_DIST_MODE'] = 'distributed'
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload, RPCMessage
SERVICE_ID = 32452
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
rpcmsg = RPCMessage(SERVICE_ID, 23, 0, 1, data, tensors)
assert rpcmsg.service_id == SERVICE_ID
assert rpcmsg.msg_seq == 23
assert rpcmsg.client_id == 0
assert rpcmsg.server_id == 1
assert len(rpcmsg.data) == len(data)
assert len(rpcmsg.tensors) == 1
assert F.array_equal(rpcmsg.tensors[0], req.z)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_rpc():
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config.txt"))
pclient = ctx.Process(target=start_client, args=("rpc_ip_config.txt",))
pserver.start()
time.sleep(1)
pclient.start()
pserver.join()
pclient.join()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_multi_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config_mul_client.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(10, "rpc_ip_config_mul_client.txt"))
pclient_list = []
for i in range(10):
pclient = ctx.Process(target=start_client, args=("rpc_ip_config_mul_client.txt",))
pclient_list.append(pclient)
pserver.start()
for i in range(10):
pclient_list[i].start()
for i in range(10):
pclient_list[i].join()
pserver.join()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_multi_thread_rpc():
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config_multithread.txt", "w")
num_servers = 2
for _ in range(num_servers): # 3 servers
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
ctx = mp.get_context('spawn')
pserver_list = []
for i in range(num_servers):
pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config_multithread.txt", i))
pserver.start()
pserver_list.append(pserver)
def start_client_multithread(ip_config):
import threading
dgl.distributed.connect_to_server(ip_config=ip_config, num_servers=1)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
dgl.distributed.send_request(0, req)
def subthread_call(server_id):
req = HelloRequest(STR, INTEGER, TENSOR+ server_id, simple_func)
dgl.distributed.send_request(server_id, req)
subthread = threading.Thread(target=subthread_call, args=(1,))
subthread.start()
subthread.join()
res0 = dgl.distributed.recv_response()
res1 = dgl.distributed.recv_response()
assert_array_equal(F.asnumpy(res0.tensor), F.asnumpy(TENSOR))
assert_array_equal(F.asnumpy(res1.tensor), F.asnumpy(TENSOR+1))
dgl.distributed.exit_client()
start_client_multithread("rpc_ip_config_multithread.txt")
pserver.join()
if __name__ == '__main__':
test_serialize()
test_rpc_msg()
test_rpc()
test_multi_client()
test_multi_thread_rpc()
|
evecat.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-06-30 15:50:54
# @Author : umm233
import requests, threading, os
from bs4 import BeautifulSoup
def download_pics(url, pic_dir, n):
pic_path = pic_dir + str(n) + '.gif'
if not os.path.isfile(pic_path):
r = requests.get(url, headers=headers)
path = pic_dir + str(n) + '.gif'
with open(path, 'wb') as f:
f.write(r.content)
# 下载完了,解锁
print('No.{} pic download successfully.'.format(n))
else:
print('No.{} pic exist => pass'.format(n))
thread_lock.release()
def get_max_pic_id():
url = 'http://motions.cat/top.html'
html = requests.get(url).content
soup = BeautifulSoup(html, 'lxml')
a0 = soup.find_all("a", "eoc-image-link")[0]
return (int)(a0.attrs['id'])
def main(pic_dir):
if not os.path.exists(pic_dir):
os.mkdir("pics")
# 获取最新图片id
max_id = get_max_pic_id()
# 多线程下载
for n in range(1, max_id + 1):
pic_id = (4 - len(str(n))) * '0' + str(n)
url = "http://motions.cat/gif/nhn/{}.gif".format(pic_id)
print('downloading No.{} pic ...'.format(n))
# 上锁
thread_lock.acquire()
t = threading.Thread(target=download_pics, args=(url, pic_dir, n))
t.start()
print('--- End! ---')
if __name__ == '__main__':
headers = {
'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
}
# 设置最大线程数,开启5个线程就锁住
thread_lock = threading.BoundedSemaphore(value=5)
# 设置下载目录
pic_dir = "pics/"
main(pic_dir)
|
nastro_mindvision.py
|
import ctypes
import cv2
import numpy as np
import threading
import mvsdk
import time
import gc
def get_first_available_camera():
devList = mvsdk.CameraEnumerateDevice()
nDev = len(devList)
if nDev < 1:
print("No camera was found!")
return None
devInfo = devList[0]
print(devInfo)
return devInfo
Tlock = threading.RLock()
camera_instance = None
class MindVisionCamera():
def __init__(self):
global camera_instance
camera_instance = self
self.continuous_acquire_run_flag = False
self.img_list = []
self.frameBuffer = None
self.hcam = 0
self.grabber = ctypes.c_voidp(0)
self.dev = get_first_available_camera()#tSdkCameraDevInfo()
self.new_frame = False
if self.dev:
try:
self.hcam = mvsdk.CameraInit(self.dev, -1, -1)
self.cap = mvsdk.CameraGetCapability(self.hcam)
self.monoCamera = (self.cap.sIspCapacity.bMonoSensor != 0)
frameBufferSize = self.cap.sResolutionRange.iWidthMax * self.cap.sResolutionRange.iHeightMax * (1 if self.monoCamera else 3)
self.frameBuffer = mvsdk.CameraAlignMalloc(frameBufferSize, 16)
mvsdk.CameraPlay(self.hcam)
#mvsdk.CameraSetTriggerMode(self.hcam, 1)
#mvsdk.CameraSetCallbackFunction(self.hcam, GrabImageCallback, 0)
except mvsdk.CameraException as e:
print("CameraInit Failed({}): {}".format(e.error_code, e.message) )
return
else:
print("mindvision_sdk.py - MindVisionCamera.__init__ - Error. No camera found!")
def set_exposure(self, exposure_microseconds):
mvsdk.CameraSetAeState(self.hcam, 0)
mvsdk.CameraSetExposureTime(self.hcam, exposure_microseconds)
def start(self):
t = threading.Thread(target = self.continuous_acquire)
t.setDaemon(True)
t.start()
#threading.Timer(1, self.continuous_acquire).start()
#frameBufferSize = self.cap.sResolutionRange.iWidthMax * self.cap.sResolutionRange.iHeightMax * (1 if self.monoCamera else 3)
#self.frameBuffer = mvsdk.CameraAlignMalloc(frameBufferSize, 16)
#mvsdk.CameraPlay(self.hcam)
def continuous_acquire(self):
global Tlock
self.continuous_acquire_run_flag = True
#mvsdk.CameraPlay(self.hcam)
while self.continuous_acquire_run_flag:
try:
self.trigger_software()
#print("new continuous image")
except mvsdk.CameraException as e:
print(e.message)
if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message) )
def stop(self):
self.continuous_acquire_run_flag = False
def get_image_buf(self):
global Tlock
if len(self.img_list)<1:
return None
#print(len(self.img_list))
gc.collect()
with Tlock:
#img = self.img_list[-1]
#self.img_list = [img,]
#img = img.reshape(512, 1280, 3 )
#ret, jpeg = cv2.imencode('.jpg', img)
#return jpeg
buf = self.img_list[:]
self.img_list = []
return buf
def set_io_state(self, n, state):
mvsdk.CameraSetIOState(self.hcam, n, state)
def set_trigger_mode(self, mode): #0- continuous, 1-software, 2-hardware
mvsdk.CameraSetTriggerMode(self.hcam, mode)
def trigger_software(self):
"""
mvsdk.CameraSoftTrigger(self.hcam)
self.new_frame = False
t = time.time()
while not self.new_frame and ((time.time()-t)<1):
time.sleep(0.001)
#mvsdk.CameraClearBuffer(self.hcam)
"""
t = time.time()
if mvsdk.CameraSoftTrigger(self.hcam) == 0:
CAMERA_GET_IMAGE_PRIORITY_OLDEST = 0, # Get the oldest frame in the cache
CAMERA_GET_IMAGE_PRIORITY_NEWEST = 1 #get the latest frame in the cache (all the old frames will be discarded)
CAMERA_GET_IMAGE_PRIORITY_NEXT = 2 #Discard all frames in the cache, and if the camera is currently being exposed or the transmission is momentarily interrupted, waiting to receive the next frame (Note: This feature is not supported on some models of cameras, Camera this mark is equivalent to CAMERA_GET_IMAGE_PRIORITY_OLDEST)
pRawData, frameHead = mvsdk.CameraGetImageBufferPriority(self.hcam, 1000, CAMERA_GET_IMAGE_PRIORITY_NEWEST)
#pRawData, frameHead = mvsdk.CameraGetImageBuffer(self.hcam, 1000)
mvsdk.CameraImageProcess(self.hcam, pRawData, self.frameBuffer, frameHead)
mvsdk.CameraReleaseImageBuffer(self.hcam, pRawData)
frame_data = (mvsdk.c_ubyte * frameHead.uBytes).from_address(self.frameBuffer)
frame = np.frombuffer(frame_data, dtype=np.uint8)
frame = frame.reshape((frameHead.iHeight, frameHead.iWidth, 1 if frameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3) )
#filename = "%s.jpg"%str(time.time())
#cv2.imwrite("./immagini_processo/" + filename, frame)
#time.sleep(0.01)
with Tlock:
self.img_list.append([t,frame.copy()])
#self.img_list.append([time.time(),cv2.flip(frame, 1)])
def close(self):
mvsdk.CameraUnInit(self.hcam)
mvsdk.CameraAlignFree(self.frameBuffer)
@ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.POINTER(mvsdk.tSdkFrameHead), ctypes.c_voidp)
#def GrabImageCallback(CameraHandle hCamera, BYTE *pFrameBuffer, tSdkFrameHead* pFrameHead,PVOID pContext):
def GrabImageCallback(hCamera, pRawData, pFrameHead, pContext):
global Tlock, camera_instance
print("new frame")
with Tlock:
#print("new image start")
mvsdk.CameraImageProcess(camera_instance.hcam, pRawData, camera_instance.frameBuffer, pFrameHead.contents)
#mvsdk.CameraReleaseImageBuffer(camera_instance.hcam, pRawData)
frame_data = (mvsdk.c_ubyte * pFrameHead.contents.uBytes).from_address(camera_instance.frameBuffer)
frame = np.frombuffer(frame_data, dtype=np.uint8)
frame = frame.reshape((pFrameHead.contents.iHeight, pFrameHead.contents.iWidth, 1 if pFrameHead.contents.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3) )
camera_instance.img_list.append([time.time(),frame])
camera_instance.new_frame = True
#cv2.imwrite("./img.png", frame)
#print("new image end %sx%s"%(pFrameHead.contents.iWidth, pFrameHead.contents.iHeight))
#sdk.CameraSetMediaType(camera_instance.hcam, pFrameBuffer, camera_instance.frameBuffer, pFrameHead)
#CameraSaveImage(m_hCamera, strFileName.GetBuffer(), pImageData, pImageHead, FILE_BMP, 100);
|
cluster.py
|
# Future
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ast
# Standard
import importlib
import signal
import socket
import traceback
import uuid
from multiprocessing import Event, Process, Value, current_process
from time import sleep
# external
import arrow
# Django
from django import db
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
# Local
import django_q.tasks
from django_q.brokers import get_broker
from django_q.conf import Conf, logger, psutil, get_ppid, error_reporter
from django_q.humanhash import humanize
from django_q.models import Task, Success, Schedule
from django_q.queues import Queue
from django_q.signals import pre_execute
from django_q.signing import SignedPackage, BadSignature
from django_q.status import Stat, Status
class Cluster(object):
def __init__(self, broker=None):
self.broker = broker or get_broker()
self.sentinel = None
self.stop_event = None
self.start_event = None
self.pid = current_process().pid
self.cluster_id = uuid.uuid4()
self.host = socket.gethostname()
self.timeout = Conf.TIMEOUT
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
def start(self):
# Start Sentinel
self.stop_event = Event()
self.start_event = Event()
self.sentinel = Process(
target=Sentinel,
args=(
self.stop_event,
self.start_event,
self.cluster_id,
self.broker,
self.timeout,
),
)
self.sentinel.start()
logger.info(_(f"Q Cluster {self.name} starting."))
while not self.start_event.is_set():
sleep(0.1)
return self.pid
def stop(self):
if not self.sentinel.is_alive():
return False
logger.info(_(f"Q Cluster {self.name} stopping."))
self.stop_event.set()
self.sentinel.join()
logger.info(_(f"Q Cluster {self.name} has stopped."))
self.start_event = None
self.stop_event = None
return True
def sig_handler(self, signum, frame):
logger.debug(
_(
f'{current_process().name} got signal {Conf.SIGNAL_NAMES.get(signum, "UNKNOWN")}'
)
)
self.stop()
@property
def stat(self):
if self.sentinel:
return Stat.get(pid=self.pid, cluster_id=self.cluster_id)
return Status(pid=self.pid, cluster_id=self.cluster_id)
@property
def name(self):
return humanize(self.cluster_id.hex)
@property
def is_starting(self):
return self.stop_event and self.start_event and not self.start_event.is_set()
@property
def is_running(self):
return self.stop_event and self.start_event and self.start_event.is_set()
@property
def is_stopping(self):
return (
self.stop_event
and self.start_event
and self.start_event.is_set()
and self.stop_event.is_set()
)
@property
def has_stopped(self):
return self.start_event is None and self.stop_event is None and self.sentinel
class Sentinel(object):
def __init__(
self,
stop_event,
start_event,
cluster_id,
broker=None,
timeout=Conf.TIMEOUT,
start=True,
):
# Make sure we catch signals for the pool
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.pid = current_process().pid
self.cluster_id = cluster_id
self.parent_pid = get_ppid()
self.name = current_process().name
self.broker = broker or get_broker()
self.reincarnations = 0
self.tob = timezone.now()
self.stop_event = stop_event
self.start_event = start_event
self.pool_size = Conf.WORKERS
self.pool = []
self.timeout = timeout
self.task_queue = (
Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
)
self.result_queue = Queue()
self.event_out = Event()
self.monitor = None
self.pusher = None
if start:
self.start()
def start(self):
self.broker.ping()
self.spawn_cluster()
self.guard()
def status(self):
if not self.start_event.is_set() and not self.stop_event.is_set():
return Conf.STARTING
elif self.start_event.is_set() and not self.stop_event.is_set():
if self.result_queue.empty() and self.task_queue.empty():
return Conf.IDLE
return Conf.WORKING
elif self.stop_event.is_set() and self.start_event.is_set():
if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0:
return Conf.STOPPING
return Conf.STOPPED
def spawn_process(self, target, *args):
"""
:type target: function or class
"""
p = Process(target=target, args=args)
p.daemon = True
if target == worker:
p.daemon = Conf.DAEMONIZE_WORKERS
p.timer = args[2]
self.pool.append(p)
p.start()
return p
def spawn_pusher(self):
return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker)
def spawn_worker(self):
self.spawn_process(
worker, self.task_queue, self.result_queue, Value("f", -1), self.timeout
)
def spawn_monitor(self):
return self.spawn_process(monitor, self.result_queue, self.broker)
def reincarnate(self, process):
"""
:param process: the process to reincarnate
:type process: Process or None
"""
close_old_django_connections()
if process == self.monitor:
self.monitor = self.spawn_monitor()
logger.error(_(f"reincarnated monitor {process.name} after sudden death"))
elif process == self.pusher:
self.pusher = self.spawn_pusher()
logger.error(_(f"reincarnated pusher {process.name} after sudden death"))
else:
self.pool.remove(process)
self.spawn_worker()
if process.timer.value == 0:
# only need to terminate on timeout, otherwise we risk destabilizing the queues
process.terminate()
logger.warn(_(f"reincarnated worker {process.name} after timeout"))
elif int(process.timer.value) == -2:
logger.info(_(f"recycled worker {process.name}"))
else:
logger.error(_(f"reincarnated worker {process.name} after death"))
self.reincarnations += 1
def spawn_cluster(self):
self.pool = []
Stat(self).save()
close_old_django_connections()
# spawn worker pool
for __ in range(self.pool_size):
self.spawn_worker()
# spawn auxiliary
self.monitor = self.spawn_monitor()
self.pusher = self.spawn_pusher()
# set worker cpu affinity if needed
if psutil and Conf.CPU_AFFINITY:
set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])
def guard(self):
logger.info(
_(
f"{current_process().name} guarding cluster {humanize(self.cluster_id.hex)}"
)
)
self.start_event.set()
Stat(self).save()
logger.info(_(f"Q Cluster {humanize(self.cluster_id.hex)} running."))
counter = 0
cycle = Conf.GUARD_CYCLE # guard loop sleep in seconds
# Guard loop. Runs at least once
while not self.stop_event.is_set() or not counter:
# Check Workers
for p in self.pool:
with p.timer.get_lock():
# Are you alive?
if not p.is_alive() or p.timer.value == 0:
self.reincarnate(p)
continue
# Decrement timer if work is being done
if p.timer.value > 0:
p.timer.value -= cycle
# Check Monitor
if not self.monitor.is_alive():
self.reincarnate(self.monitor)
# Check Pusher
if not self.pusher.is_alive():
self.reincarnate(self.pusher)
# Call scheduler once a minute (or so)
counter += cycle
if counter >= 30 and Conf.SCHEDULER:
counter = 0
scheduler(broker=self.broker)
# Save current status
Stat(self).save()
sleep(cycle)
self.stop()
def stop(self):
Stat(self).save()
name = current_process().name
logger.info(_(f"{name} stopping cluster processes"))
# Stopping pusher
self.event_out.set()
# Wait for it to stop
while self.pusher.is_alive():
sleep(0.1)
Stat(self).save()
# Put poison pills in the queue
for __ in range(len(self.pool)):
self.task_queue.put("STOP")
self.task_queue.close()
# wait for the task queue to empty
self.task_queue.join_thread()
# Wait for all the workers to exit
while len(self.pool):
for p in self.pool:
if not p.is_alive():
self.pool.remove(p)
sleep(0.1)
Stat(self).save()
# Finally stop the monitor
self.result_queue.put("STOP")
self.result_queue.close()
# Wait for the result queue to empty
self.result_queue.join_thread()
logger.info(_(f"{name} waiting for the monitor."))
# Wait for everything to close or time out
count = 0
if not self.timeout:
self.timeout = 30
while self.status() == Conf.STOPPING and count < self.timeout * 10:
sleep(0.1)
Stat(self).save()
count += 1
# Final status
Stat(self).save()
def pusher(task_queue, event, broker=None):
"""
Pulls tasks of the broker and puts them in the task queue
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
"""
if not broker:
broker = get_broker()
logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}"))
while True:
try:
task_set = broker.dequeue()
except Exception as e:
logger.error(e, traceback.format_exc())
# broker probably crashed. Let the sentinel handle it.
sleep(10)
break
if task_set:
for task in task_set:
ack_id = task[0]
# unpack the task
try:
task = SignedPackage.loads(task[1])
except (TypeError, BadSignature) as e:
logger.error(e, traceback.format_exc())
broker.fail(ack_id)
continue
task["ack_id"] = ack_id
task_queue.put(task)
logger.debug(_(f"queueing from {broker.list_key}"))
if event.is_set():
break
logger.info(_(f"{current_process().name} stopped pushing tasks"))
def monitor(result_queue, broker=None):
"""
Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue
"""
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_(f"{name} monitoring at {current_process().pid}"))
for task in iter(result_queue.get, "STOP"):
# save the result
if task.get("cached", False):
save_cached(task, broker)
else:
save_task(task, broker)
# acknowledge result
ack_id = task.pop("ack_id", False)
if ack_id and (task["success"] or task.get("ack_failure", False)):
broker.acknowledge(ack_id)
# log the result
if task["success"]:
# log success
logger.info(_(f"Processed [{task['name']}]"))
else:
# log failure
logger.error(_(f"Failed [{task['name']}] - {task['result']}"))
logger.info(_(f"{name} stopped monitoring results"))
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT):
"""
Takes a task from the task queue, tries to execute it and puts the result back in the result queue
:type task_queue: multiprocessing.Queue
:type result_queue: multiprocessing.Queue
:type timer: multiprocessing.Value
"""
name = current_process().name
logger.info(_(f"{name} ready for work at {current_process().pid}"))
task_count = 0
if timeout is None:
timeout = -1
# Start reading the task queue
for task in iter(task_queue.get, "STOP"):
result = None
timer.value = -1 # Idle
task_count += 1
# Get the function from the task
logger.info(_(f'{name} processing [{task["name"]}]'))
f = task["func"]
# if it's not an instance try to get it from the string
if not callable(task["func"]):
try:
module, func = f.rsplit(".", 1)
m = importlib.import_module(module)
f = getattr(m, func)
except (ValueError, ImportError, AttributeError) as e:
result = (e, False)
if error_reporter:
error_reporter.report()
# We're still going
if not result:
close_old_django_connections()
timer_value = task.pop("timeout", timeout)
# signal execution
pre_execute.send(sender="django_q", func=f, task=task)
# execute the payload
timer.value = timer_value # Busy
try:
res = f(*task["args"], **task["kwargs"])
result = (res, True)
except Exception as e:
result = (f"{e} : {traceback.format_exc()}", False)
if error_reporter:
error_reporter.report()
with timer.get_lock():
# Process result
task["result"] = result[0]
task["success"] = result[1]
task["stopped"] = timezone.now()
result_queue.put(task)
timer.value = -1 # Idle
# Recycle
if task_count == Conf.RECYCLE:
timer.value = -2 # Recycled
break
logger.info(_(f"{name} stopped doing work"))
def save_task(task, broker):
"""
Saves the task package to Django or the cache
"""
# SAVE LIMIT < 0 : Don't save success
if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]:
return
# enqueues next in a chain
if task.get("chain", None):
django_q.tasks.async_chain(
task["chain"],
group=task["group"],
cached=task["cached"],
sync=task["sync"],
broker=broker,
)
# SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
close_old_django_connections()
try:
if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
Success.objects.last().delete()
# check if this task has previous results
if Task.objects.filter(id=task["id"], name=task["name"]).exists():
existing_task = Task.objects.get(id=task["id"], name=task["name"])
# only update the result if it hasn't succeeded yet
if not existing_task.success:
existing_task.stopped = task["stopped"]
existing_task.result = task["result"]
existing_task.success = task["success"]
existing_task.save()
else:
Task.objects.create(
id=task["id"],
name=task["name"],
func=task["func"],
hook=task.get("hook"),
args=task["args"],
kwargs=task["kwargs"],
started=task["started"],
stopped=task["stopped"],
result=task["result"],
group=task.get("group"),
success=task["success"],
)
except Exception as e:
logger.error(e)
def save_cached(task, broker):
task_key = f'{broker.list_key}:{task["id"]}'
timeout = task["cached"]
if timeout is True:
timeout = None
try:
group = task.get("group", None)
iter_count = task.get("iter_count", 0)
# if it's a group append to the group list
if group:
group_key = f"{broker.list_key}:{group}:keys"
group_list = broker.cache.get(group_key) or []
# if it's an iter group, check if we are ready
if iter_count and len(group_list) == iter_count - 1:
group_args = f"{broker.list_key}:{group}:args"
# collate the results into a Task result
results = [
SignedPackage.loads(broker.cache.get(k))["result"]
for k in group_list
]
results.append(task["result"])
task["result"] = results
task["id"] = group
task["args"] = SignedPackage.loads(broker.cache.get(group_args))
task.pop("iter_count", None)
task.pop("group", None)
if task.get("iter_cached", None):
task["cached"] = task.pop("iter_cached", None)
save_cached(task, broker=broker)
else:
save_task(task, broker)
broker.cache.delete_many(group_list)
broker.cache.delete_many([group_key, group_args])
return
# save the group list
group_list.append(task_key)
broker.cache.set(group_key, group_list, timeout)
# async_task next in a chain
if task.get("chain", None):
django_q.tasks.async_chain(
task["chain"],
group=group,
cached=task["cached"],
sync=task["sync"],
broker=broker,
)
# save the task
broker.cache.set(task_key, SignedPackage.dumps(task), timeout)
except Exception as e:
logger.error(e)
def scheduler(broker=None):
"""
Creates a task from a schedule at the scheduled time and schedules next run
"""
if not broker:
broker = get_broker()
close_old_django_connections()
try:
with db.transaction.atomic():
for s in (
Schedule.objects.select_for_update()
.exclude(repeats=0)
.filter(next_run__lt=timezone.now())
):
args = ()
kwargs = {}
# get args, kwargs and hook
if s.kwargs:
try:
# eval should be safe here because dict()
kwargs = eval(f"dict({s.kwargs})")
except SyntaxError:
kwargs = {}
if s.args:
args = ast.literal_eval(s.args)
# single value won't eval to tuple, so:
if type(args) != tuple:
args = (args,)
q_options = kwargs.get("q_options", {})
if s.hook:
q_options["hook"] = s.hook
# set up the next run time
if not s.schedule_type == s.ONCE:
next_run = arrow.get(s.next_run)
while True:
if s.schedule_type == s.MINUTES:
next_run = next_run.shift(minutes=+(s.minutes or 1))
elif s.schedule_type == s.HOURLY:
next_run = next_run.shift(hours=+1)
elif s.schedule_type == s.DAILY:
next_run = next_run.shift(days=+1)
elif s.schedule_type == s.WEEKLY:
next_run = next_run.shift(weeks=+1)
elif s.schedule_type == s.MONTHLY:
next_run = next_run.shift(months=+1)
elif s.schedule_type == s.QUARTERLY:
next_run = next_run.shift(months=+3)
elif s.schedule_type == s.YEARLY:
next_run = next_run.shift(years=+1)
if Conf.CATCH_UP or next_run > arrow.utcnow():
break
s.next_run = next_run.datetime
s.repeats += -1
# send it to the cluster
q_options["broker"] = broker
q_options["group"] = q_options.get("group", s.name or s.id)
kwargs["q_options"] = q_options
s.task = django_q.tasks.async_task(s.func, *args, **kwargs)
# log it
if not s.task:
logger.error(
_(
f"{current_process().name} failed to create a task from schedule [{s.name or s.id}]"
)
)
else:
logger.info(
_(
f"{current_process().name} created a task from schedule [{s.name or s.id}]"
)
)
# default behavior is to delete a ONCE schedule
if s.schedule_type == s.ONCE:
if s.repeats < 0:
s.delete()
continue
# but not if it has a positive repeats
s.repeats = 0
# save the schedule
s.save()
except Exception as e:
logger.error(e)
def close_old_django_connections():
"""
Close django connections unless running with sync=True.
"""
if Conf.SYNC:
logger.warning(
"Preserving django database connections because sync=True. Beware "
"that tasks are now injected in the calling context/transactions "
"which may result in unexpected bahaviour."
)
else:
db.close_old_connections()
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
"""
Sets the cpu affinity for the supplied processes.
Requires the optional psutil module.
:param int n: affinity
:param list process_ids: a list of pids
:param bool actual: Test workaround for Travis not supporting cpu affinity
"""
# check if we have the psutil module
if not psutil:
logger.warning("Skipping cpu affinity because psutil was not found.")
return
# check if the platform supports cpu_affinity
if actual and not hasattr(psutil.Process(process_ids[0]), "cpu_affinity"):
logger.warning(
"Faking cpu affinity because it is not supported on this platform"
)
actual = False
# get the available processors
cpu_list = list(range(psutil.cpu_count()))
# affinities of 0 or gte cpu_count, equals to no affinity
if not n or n >= len(cpu_list):
return
# spread the workers over the available processors.
index = 0
for pid in process_ids:
affinity = []
for k in range(n):
if index == len(cpu_list):
index = 0
affinity.append(cpu_list[index])
index += 1
if psutil.pid_exists(pid):
p = psutil.Process(pid)
if actual:
p.cpu_affinity(affinity)
logger.info(_(f"{pid} will use cpu {affinity}"))
|
test_tensorflow2_autolog.py
|
# pep8: disable=E501
import collections
import os
import pickle
import sys
from unittest.mock import patch
import json
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from packaging.version import Version
from tensorflow.keras import layers
import yaml
import mlflow
import mlflow.keras
import mlflow.tensorflow
from mlflow.models import Model
from mlflow.models.utils import _read_example
from mlflow.tensorflow._autolog import _TensorBoard, __MLflowTfKeras2Callback
from mlflow.tracking.client import MlflowClient
from mlflow.utils.autologging_utils import BatchMetricsLogger, autologging_is_disabled
np.random.seed(1337)
SavedModelInfo = collections.namedtuple(
"SavedModelInfo",
["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"],
)
@pytest.fixture(autouse=True)
def clear_session():
yield
tf.keras.backend.clear_session()
@pytest.fixture
def random_train_data():
return np.random.random((150, 4))
@pytest.fixture
def random_one_hot_labels():
n, n_class = (150, 3)
classes = np.random.randint(0, n_class, n)
labels = np.zeros((n, n_class))
labels[np.arange(n), classes] = 1
return labels
@pytest.fixture
def random_train_dict_mapping(random_train_data):
def _generate_features(pos):
return [v[pos] for v in random_train_data]
features = {
"a": np.array(_generate_features(0)),
"b": np.array(_generate_features(1)),
"c": np.array(_generate_features(2)),
"d": np.array(_generate_features(3)),
}
return features
def _create_model_for_dict_mapping():
model = tf.keras.Sequential()
model.add(
layers.DenseFeatures(
[
tf.feature_column.numeric_column("a"),
tf.feature_column.numeric_column("b"),
tf.feature_column.numeric_column("c"),
tf.feature_column.numeric_column("d"),
]
)
)
model.add(layers.Dense(16, activation="relu", input_shape=(4,)))
model.add(layers.Dense(3, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
@pytest.fixture
def fashion_mnist_tf_dataset():
train, _ = tf.keras.datasets.fashion_mnist.load_data()
images, labels = train
images = images / 255.0
labels = labels.astype(np.int32)
fmnist_train_ds = tf.data.Dataset.from_tensor_slices((images, labels))
fmnist_train_ds = fmnist_train_ds.shuffle(5000).batch(32)
return fmnist_train_ds
def _create_fashion_mnist_model():
model = tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(10)])
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
return model
@pytest.fixture
def keras_data_gen_sequence(random_train_data, random_one_hot_labels):
class DataGenerator(tf.keras.utils.Sequence):
def __len__(self):
return 128
def __getitem__(self, index):
x = random_train_data
y = random_one_hot_labels
return x, y
return DataGenerator()
@pytest.fixture
def clear_tf_keras_imports():
"""
Simulates a state where `tensorflow` and `keras` are not imported by removing these
libraries from the `sys.modules` dictionary. This is useful for testing the interaction
between TensorFlow / Keras and the fluent `mlflow.autolog()` API because it will cause import
hooks to be re-triggered upon re-import after `mlflow.autolog()` is enabled.
"""
sys.modules.pop("tensorflow", None)
sys.modules.pop("keras", None)
@pytest.fixture(autouse=True)
def clear_fluent_autologging_import_hooks():
"""
Clears import hooks for MLflow fluent autologging (`mlflow.autolog()`) between tests
to ensure that interactions between fluent autologging and TensorFlow / tf.keras can
be tested successfully
"""
mlflow.utils.import_hooks._post_import_hooks.pop("tensorflow", None)
mlflow.utils.import_hooks._post_import_hooks.pop("keras", None)
def create_tf_keras_model():
model = tf.keras.Sequential()
model.add(layers.Dense(16, activation="relu", input_shape=(4,)))
model.add(layers.Dense(3, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
@pytest.mark.large
def test_tf_keras_autolog_ends_auto_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_tf_keras_autolog_log_models_configuration(
random_train_data, random_one_hot_labels, log_models
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(log_models=log_models)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = map(lambda x: x.path, artifacts)
assert ("model" in artifacts) == log_models
@pytest.mark.large
def test_tf_keras_autolog_persists_manually_created_run(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_keras_random_data_run(random_train_data, random_one_hot_labels, initial_epoch):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
history = model.fit(
data, labels, epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_logs_expected_data(tf_keras_random_data_run):
run, history = tf_keras_random_data_run
data = run.data
assert "accuracy" in data.metrics
assert "loss" in data.metrics
# Testing explicitly passed parameters are logged correctly
assert "epochs" in data.params
assert data.params["epochs"] == str(history.epoch[-1] + 1)
assert "steps_per_epoch" in data.params
assert data.params["steps_per_epoch"] == "1"
# Testing default parameters are logged correctly
assert "initial_epoch" in data.params
assert data.params["initial_epoch"] == str(history.epoch[0])
# Testing unwanted parameters are not logged
assert "callbacks" not in data.params
assert "validation_data" not in data.params
# Testing optimizer parameters are logged
assert "opt_name" in data.params
assert data.params["opt_name"] == "Adam"
assert "opt_learning_rate" in data.params
assert "opt_decay" in data.params
assert "opt_beta_1" in data.params
assert "opt_beta_2" in data.params
assert "opt_epsilon" in data.params
assert "opt_amsgrad" in data.params
assert data.params["opt_amsgrad"] == "False"
client = mlflow.tracking.MlflowClient()
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
num_of_epochs = len(history.history["loss"])
assert len(all_epoch_acc) == num_of_epochs == 10
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model_summary.txt" in artifacts
@pytest.mark.large
def test_tf_keras_autolog_records_metrics_for_last_epoch(random_train_data, random_one_hot_labels):
every_n_iter = 5
num_training_epochs = 17
mlflow.tensorflow.autolog(every_n_iter=every_n_iter)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(
random_train_data,
random_one_hot_labels,
epochs=num_training_epochs,
initial_epoch=0,
)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
assert set([metric.step for metric in all_epoch_acc]) == set([0, 5, 10, 15])
@pytest.mark.large
def test_tf_keras_autolog_logs_metrics_for_single_epoch_training(
random_train_data, random_one_hot_labels
):
"""
tf.Keras exhibits inconsistent epoch indexing behavior in comparison with other
TF2 APIs (e.g., tf.Estimator). tf.Keras uses zero-indexing for epochs,
while other APIs use one-indexing. Accordingly, this test verifies that metrics are
produced in the boundary case where a model is trained for a single epoch, ensuring
that we don't miss the zero index in the tf.Keras case.
"""
mlflow.tensorflow.autolog(every_n_iter=5)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=1)
client = mlflow.tracking.MlflowClient()
run_metrics = client.get_run(run.info.run_id).data.metrics
assert "accuracy" in run_metrics
assert "loss" in run_metrics
@pytest.mark.large
def test_tf_keras_autolog_names_positional_parameters_correctly(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(every_n_iter=5)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
with mlflow.start_run():
# Pass `batch_size` as a positional argument for testing purposes
model.fit(data, labels, 8, epochs=10, steps_per_epoch=1)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run_info = client.get_run(run_id)
assert run_info.data.params.get("batch_size") == "8"
@pytest.mark.large
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data):
run, _ = tf_keras_random_data_run
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
assert "tensorboard_logs" in artifacts
model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
model.predict(random_train_data)
def get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(every_n_iter=1)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if callback == "early":
# min_delta is set as such to guarantee early stopping
callback = tf.keras.callbacks.EarlyStopping(
monitor="loss",
patience=patience,
min_delta=99999999,
restore_best_weights=restore_weights,
verbose=1,
)
else:
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
pass
callback = CustomCallback()
history = model.fit(
data, labels, epochs=initial_epoch + 10, callbacks=[callback], initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history, callback
@pytest.fixture
def tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
):
return get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_logs(tf_keras_random_data_run_with_callback, initial_epoch):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" in metrics
restored_epoch = int(metrics["restored_epoch"])
# In this test, the best epoch is always the first epoch because the early stopping callback
# never observes a loss improvement due to an extremely large `min_delta` value
assert restored_epoch == initial_epoch
assert "loss" in history.history
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check that MLflow has logged the metrics of the "best" model, in addition to per-epoch metrics
loss = history.history["loss"]
assert len(metric_history) == len(loss) + 1
steps, values = map(list, zip(*[(m.step, m.value) for m in metric_history]))
# Check that MLflow has logged the correct steps
assert steps == [*history.epoch, callback.stopped_epoch + 1]
# Check that MLflow has logged the correct metric values
np.testing.assert_allclose(values, [*loss, callback.best])
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_batch_metrics_logger_logs_expected_metrics(
callback,
restore_weights,
patience,
initial_epoch,
random_train_data,
random_one_hot_labels,
):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
run, _, callback = get_tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
callback,
restore_weights,
patience,
initial_epoch,
)
patched_metrics_data = dict(patched_metrics_data)
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
restored_epoch = int(patched_metrics_data["restored_epoch"])
assert restored_epoch == initial_epoch
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_stop_does_not_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_restore_doesnt_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == callback.patience + 1
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_non_early_stop_callback_no_log(tf_keras_random_data_run_with_callback):
run, history = tf_keras_random_data_run_with_callback[:-1]
metrics = run.data.metrics
params = run.data.params
assert "patience" not in params
assert "monitor" not in params
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.parametrize("positional", [True, False])
def test_tf_keras_autolog_does_not_mutate_original_callbacks_list(
tmpdir, random_train_data, random_one_hot_labels, positional
):
"""
TensorFlow autologging passes new callbacks to the `fit()` / `fit_generator()` function. If
preexisting user-defined callbacks already exist, these new callbacks are added to the
user-specified ones. This test verifies that the new callbacks are added to the without
permanently mutating the original list of callbacks.
"""
mlflow.tensorflow.autolog()
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tmpdir)
callbacks = [tensorboard_callback]
model = create_tf_keras_model()
data = random_train_data
labels = random_one_hot_labels
if positional:
model.fit(data, labels, None, 10, 1, callbacks)
else:
model.fit(data, labels, epochs=10, callbacks=callbacks)
assert len(callbacks) == 1
assert callbacks == [tensorboard_callback]
@pytest.mark.large
def test_tf_keras_autolog_does_not_delete_logging_directory_for_tensorboard_callback(
tmpdir, random_train_data, random_one_hot_labels
):
tensorboard_callback_logging_dir_path = str(tmpdir.mkdir("tb_logs"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
tensorboard_callback_logging_dir_path, histogram_freq=0
)
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10, callbacks=[tensorboard_callback])
assert os.path.exists(tensorboard_callback_logging_dir_path)
@pytest.mark.large
def test_tf_keras_autolog_logs_to_and_deletes_temporary_directory_when_tensorboard_callback_absent(
tmpdir, random_train_data, random_one_hot_labels
):
from unittest import mock
from mlflow.tensorflow import _TensorBoardLogDir
mlflow.tensorflow.autolog()
mock_log_dir_inst = _TensorBoardLogDir(location=str(tmpdir.mkdir("tb_logging")), is_temp=True)
with mock.patch("mlflow.tensorflow._TensorBoardLogDir", autospec=True) as mock_log_dir_class:
mock_log_dir_class.return_value = mock_log_dir_inst
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
assert not os.path.exists(mock_log_dir_inst.location)
def create_tf_estimator_model(directory, export, training_steps=100, use_v1_estimator=False):
CSV_COLUMN_NAMES = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species"]
train = pd.read_csv(
os.path.join(os.path.dirname(__file__), "iris_training.csv"),
names=CSV_COLUMN_NAMES,
header=0,
)
train_y = train.pop("Species")
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
feature_spec = {}
for feature in CSV_COLUMN_NAMES:
feature_spec[feature] = tf.Variable([], dtype=tf.float64, name=feature)
receiver_fn = tf_estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
run_config = tf_estimator.RunConfig(
# Emit loss metrics to TensorBoard every step
save_summary_steps=1,
)
# If flag set to true, then use the v1 classifier that extends Estimator
# If flag set to false, then use the v2 classifier that extends EstimatorV2
if use_v1_estimator:
classifier = tf.compat.v1.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
else:
classifier = tf_estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
config=run_config,
)
classifier.train(input_fn=lambda: input_fn(train, train_y, training=True), steps=training_steps)
if export:
classifier.export_saved_model(directory, receiver_fn)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_ends_auto_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_persists_manually_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
with mlflow.start_run() as run:
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_estimator_random_data_run(tmpdir, export):
# pylint: disable=unused-argument
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
@pytest.mark.parametrize("use_v1_estimator", [True, False])
def test_tf_estimator_autolog_logs_metrics(tmpdir, export, use_v1_estimator):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog(every_n_iter=5)
with mlflow.start_run():
create_tf_estimator_model(
str(directory), export, use_v1_estimator=use_v1_estimator, training_steps=17
)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run = client.get_run(run_id)
assert "loss" in run.data.metrics
assert "steps" in run.data.params
metrics = client.get_metric_history(run_id, "loss")
assert set([metric.step for metric in metrics]) == set([1, 6, 11, 16])
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_v1_autolog_can_load_from_artifact(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export, use_v1_estimator=True)
client = mlflow.tracking.MlflowClient()
tf_estimator_v1_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
artifacts = client.list_artifacts(tf_estimator_v1_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_v1_run.info.run_id + "/model")
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_logs_tensorboard_logs(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
assert any(["tensorboard_logs" in a.path and a.is_dir for a in artifacts])
@pytest.mark.large
def test_tf_estimator_autolog_logs_metrics_in_exclusive_mode(tmpdir):
mlflow.tensorflow.autolog(exclusive=True)
create_tf_estimator_model(tmpdir, export=False)
client = mlflow.tracking.MlflowClient()
tf_estimator_run = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
assert "loss" in tf_estimator_run.data.metrics
assert "steps" in tf_estimator_run.data.params
metrics = client.get_metric_history(tf_estimator_run.info.run_id, "loss")
assert len(metrics) == 100
@pytest.mark.large
def test_tf_estimator_autolog_logs_metics_for_single_epoch_training(tmpdir):
"""
Epoch indexing behavior is consistent across TensorFlow 2: tf.Keras uses
zero-indexing for epochs, while other APIs (e.g., tf.Estimator) use one-indexing.
This test verifies that metrics are produced for tf.Estimator training sessions
in the boundary casewhere a model is trained for a single epoch, ensuring that
we capture metrics from the first epoch at index 1.
"""
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
create_tf_estimator_model(str(tmpdir), export=False, training_steps=1)
client = mlflow.tracking.MlflowClient()
metrics = client.get_metric_history(run.info.run_id, "loss")
assert len(metrics) == 1
assert metrics[0].step == 1
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_autolog_model_can_load_from_artifact(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_random_data_run.info.run_id + "/model")
@pytest.mark.large
def test_flush_queue_is_thread_safe():
"""
Autologging augments TensorBoard event logging hooks with MLflow `log_metric` API
calls. To prevent these API calls from blocking TensorBoard event logs, `log_metric`
API calls are scheduled via `_flush_queue` on a background thread. Accordingly, this test
verifies that `_flush_queue` is thread safe.
"""
from threading import Thread
from mlflow.entities import Metric
from mlflow.tensorflow import _flush_queue, _metric_queue_lock
client = mlflow.tracking.MlflowClient()
run = client.create_run(experiment_id="0")
metric_queue_item = (run.info.run_id, Metric("foo", 0.1, 100, 1))
mlflow.tensorflow._metric_queue.append(metric_queue_item)
# Verify that, if another thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue terminates and does not modify the queue
_metric_queue_lock.acquire()
flush_thread1 = Thread(target=_flush_queue)
flush_thread1.start()
flush_thread1.join()
assert len(mlflow.tensorflow._metric_queue) == 1
assert mlflow.tensorflow._metric_queue[0] == metric_queue_item
_metric_queue_lock.release()
# Verify that, if no other thread holds a lock on the metric queue leveraged by
# _flush_queue, _flush_queue flushes the queue as expected
flush_thread2 = Thread(target=_flush_queue)
flush_thread2.start()
flush_thread2.join()
assert len(mlflow.tensorflow._metric_queue) == 0
def get_text_vec_model(train_samples):
# Taken from: https://github.com/mlflow/mlflow/issues/3910
# pylint: disable=no-name-in-module
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
VOCAB_SIZE = 10
SEQUENCE_LENGTH = 16
EMBEDDING_DIM = 16
vectorizer_layer = TextVectorization(
input_shape=(1,),
max_tokens=VOCAB_SIZE,
output_mode="int",
output_sequence_length=SEQUENCE_LENGTH,
)
vectorizer_layer.adapt(train_samples)
model = tf.keras.Sequential(
[
vectorizer_layer,
tf.keras.layers.Embedding(
VOCAB_SIZE,
EMBEDDING_DIM,
name="embedding",
mask_zero=True,
input_shape=(1,),
),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1, activation="tanh"),
]
)
model.compile(optimizer="adam", loss="mse", metrics="mae")
return model
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.3.0"),
reason=(
"Deserializing a model with `TextVectorization` and `Embedding`"
"fails in tensorflow < 2.3.0. See this issue:"
"https://github.com/tensorflow/tensorflow/issues/38250"
),
)
def test_autolog_text_vec_model(tmpdir):
"""
Verifies autolog successfully saves a model that can't be saved in the H5 format
"""
mlflow.tensorflow.autolog()
train_samples = np.array(["this is an example", "another example"])
train_labels = np.array([0.4, 0.2])
model = get_text_vec_model(train_samples)
# Saving in the H5 format should fail
with pytest.raises(NotImplementedError, match="is not supported in h5"):
model.save(tmpdir.join("model.h5").strpath, save_format="h5")
with mlflow.start_run() as run:
model.fit(train_samples, train_labels, epochs=1)
loaded_model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
np.testing.assert_array_equal(loaded_model.predict(train_samples), model.predict(train_samples))
def test_fit_generator(random_train_data, random_one_hot_labels):
mlflow.tensorflow.autolog()
model = create_tf_keras_model()
def generator():
while True:
yield random_train_data, random_one_hot_labels
with mlflow.start_run() as run:
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
run = mlflow.tracking.MlflowClient().get_run(run.info.run_id)
params = run.data.params
metrics = run.data.metrics
assert "epochs" in params
assert params["epochs"] == "10"
assert "steps_per_epoch" in params
assert params["steps_per_epoch"] == "1"
assert "accuracy" in metrics
assert "loss" in metrics
@pytest.mark.large
def test_tf_keras_model_autolog_registering_model(random_train_data, random_one_hot_labels):
registered_model_name = "test_autolog_registered_model"
mlflow.tensorflow.autolog(registered_model_name=registered_model_name)
with mlflow.start_run():
model = create_tf_keras_model()
model.fit(random_train_data, random_one_hot_labels, epochs=10)
registered_model = MlflowClient().get_registered_model(registered_model_name)
assert registered_model.name == registered_model_name
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_logs_expected_content(
random_train_data, random_one_hot_labels
):
"""
Guards against previously-exhibited issues where using the fluent `mlflow.autolog()` API with
`tf.keras` Models did not work due to conflicting patches set by both the
`mlflow.tensorflow.autolog()` and the `mlflow.keras.autolog()` APIs.
"""
mlflow.autolog()
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_data = client.get_run(run.info.run_id).data
assert "accuracy" in run_data.metrics
assert "epochs" in run_data.params
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
def test_callback_is_picklable():
cb = __MLflowTfKeras2Callback(
log_models=True, metrics_logger=BatchMetricsLogger(run_id="1234"), log_every_n_steps=5
)
pickle.dumps(cb)
tb = _TensorBoard()
pickle.dumps(tb)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.1.0"), reason="This test requires tensorflow >= 2.1.0"
)
def test_tf_keras_autolog_distributed_training(random_train_data, random_one_hot_labels):
# Ref: https://www.tensorflow.org/tutorials/distribute/keras
mlflow.tensorflow.autolog()
with tf.distribute.MirroredStrategy().scope():
model = create_tf_keras_model()
fit_params = {"epochs": 10, "batch_size": 10}
with mlflow.start_run() as run:
model.fit(random_train_data, random_one_hot_labels, **fit_params)
client = mlflow.tracking.MlflowClient()
assert client.get_run(run.info.run_id).data.params.keys() >= fit_params.keys()
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow only has a hard dependency on Keras in version >= 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_fluent_autolog_with_tf_keras_preserves_v2_model_reference():
"""
Verifies that, in TensorFlow >= 2.6.0, `tensorflow.keras.Model` refers to the correct class in
the correct module after `mlflow.autolog()` is called, guarding against previously identified
compatibility issues between recent versions of TensorFlow and MLflow's internal utility for
setting up autologging import hooks.
"""
mlflow.autolog()
import tensorflow.keras
from keras.api._v2.keras import Model as ModelV2
assert tensorflow.keras.Model is ModelV2
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tensorflow_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow # pylint: disable=unused-variable,unused-import,reimported
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
import keras # pylint: disable=unused-variable,unused-import
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_tf_keras_with_fluent_autolog_enables_tf_autologging():
mlflow.autolog()
import tensorflow.keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
# NB: In Tensorflow >= 2.6, we redirect keras autologging to tensorflow autologging
# so the original keras autologging is disabled
if Version(tf.__version__) >= Version("2.6"):
# NB: For TF >= 2.6, import tensorflow.keras will trigger importing keras
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason=("TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0"),
)
@pytest.mark.usefixtures("clear_tf_keras_imports")
def test_import_keras_with_fluent_autolog_enables_tensorflow_autologging():
mlflow.autolog()
import keras # pylint: disable=unused-variable,unused-import
assert not autologging_is_disabled(mlflow.tensorflow.FLAVOR_NAME)
assert autologging_is_disabled(mlflow.keras.FLAVOR_NAME)
def _assert_keras_autolog_infers_model_signature_correctly(run, input_sig_spec, output_sig_spec):
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run.info.run_id, "model")]
ml_model_filename = "MLmodel"
assert str(os.path.join("model", ml_model_filename)) in artifacts
ml_model_path = os.path.join(artifacts_dir, "model", ml_model_filename)
with open(ml_model_path, "r") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
assert data is not None
assert "signature" in data
signature = data["signature"]
assert signature is not None
assert "inputs" in signature
assert "outputs" in signature
assert json.loads(signature["inputs"]) == input_sig_spec
assert json.loads(signature["outputs"]) == output_sig_spec
def _assert_keras_autolog_input_example_load_and_predict_with_nparray(run, random_train_data):
model_path = os.path.join(run.info.artifact_uri, "model")
model_conf = Model.load(os.path.join(model_path, "MLmodel"))
input_example = _read_example(model_conf, model_path)
np.testing.assert_array_almost_equal(input_example, random_train_data[:5])
pyfunc_model = mlflow.pyfunc.load_model(os.path.join(run.info.artifact_uri, "model"))
pyfunc_model.predict(input_example)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_input_example_load_and_predict_with_nparray(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(log_input_examples=True)
initial_model = create_tf_keras_model()
with mlflow.start_run() as run:
initial_model.fit(random_train_data, random_one_hot_labels)
_assert_keras_autolog_input_example_load_and_predict_with_nparray(run, random_train_data)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_infers_model_signature_correctly_with_nparray(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog()
initial_model = create_tf_keras_model()
with mlflow.start_run() as run:
initial_model.fit(random_train_data, random_one_hot_labels)
_assert_keras_autolog_infers_model_signature_correctly(
run,
[{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 4]}}],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}}],
)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_input_example_load_and_predict_with_tf_dataset(fashion_mnist_tf_dataset):
mlflow.tensorflow.autolog(log_input_examples=True)
fashion_mnist_model = _create_fashion_mnist_model()
with mlflow.start_run() as run:
fashion_mnist_model.fit(fashion_mnist_tf_dataset)
model_path = os.path.join(run.info.artifact_uri, "model")
model_conf = Model.load(os.path.join(model_path, "MLmodel"))
input_example = _read_example(model_conf, model_path)
pyfunc_model = mlflow.pyfunc.load_model(os.path.join(run.info.artifact_uri, "model"))
pyfunc_model.predict(input_example)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_infers_model_signature_correctly_with_tf_dataset(fashion_mnist_tf_dataset):
mlflow.tensorflow.autolog()
fashion_mnist_model = _create_fashion_mnist_model()
with mlflow.start_run() as run:
fashion_mnist_model.fit(fashion_mnist_tf_dataset)
_assert_keras_autolog_infers_model_signature_correctly(
run,
[{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 28, 28]}}],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 10]}}],
)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_input_example_load_and_predict_with_dict(
random_train_dict_mapping, random_one_hot_labels
):
mlflow.tensorflow.autolog(log_input_examples=True)
model = _create_model_for_dict_mapping()
with mlflow.start_run() as run:
model.fit(random_train_dict_mapping, random_one_hot_labels)
model_path = os.path.join(run.info.artifact_uri, "model")
model_conf = Model.load(os.path.join(model_path, "MLmodel"))
input_example = _read_example(model_conf, model_path)
for k, v in random_train_dict_mapping.items():
np.testing.assert_array_almost_equal(input_example[k], np.take(v, range(0, 5)))
pyfunc_model = mlflow.pyfunc.load_model(os.path.join(run.info.artifact_uri, "model"))
pyfunc_model.predict(input_example)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_infers_model_signature_correctly_with_dict(
random_train_dict_mapping, random_one_hot_labels
):
mlflow.tensorflow.autolog()
model = _create_model_for_dict_mapping()
with mlflow.start_run() as run:
model.fit(random_train_dict_mapping, random_one_hot_labels)
_assert_keras_autolog_infers_model_signature_correctly(
run,
[
{"name": "a", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
{"name": "b", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
{"name": "c", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
{"name": "d", "type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1]}},
],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}}],
)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_input_example_load_and_predict_with_keras_sequence(keras_data_gen_sequence):
mlflow.tensorflow.autolog(log_input_examples=True)
model = create_tf_keras_model()
with mlflow.start_run() as run:
model.fit(keras_data_gen_sequence)
_assert_keras_autolog_input_example_load_and_predict_with_nparray(
run, keras_data_gen_sequence[:][0][:5]
)
@pytest.mark.large
@pytest.mark.skipif(
Version(tf.__version__) < Version("2.6.0"),
reason="TensorFlow autologging is not used for vanilla Keras models in Keras < 2.6.0",
)
def test_keras_autolog_infers_model_signature_correctly_with_keras_sequence(
keras_data_gen_sequence,
):
mlflow.tensorflow.autolog()
initial_model = create_tf_keras_model()
with mlflow.start_run() as run:
initial_model.fit(keras_data_gen_sequence)
_assert_keras_autolog_infers_model_signature_correctly(
run,
[{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": [-1, 4]}}],
[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}}],
)
|
tests.py
|
from __future__ import unicode_literals
import threading
import warnings
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't available. You'll lose
# microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(TestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(TestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.rel instead.'
)
|
Hiwin_RT605_ArmCommand_Socket_20190627192719.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
self.get_connect()
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = client()
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.recv(1024))
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
youtube-dl-server.py
|
from __future__ import unicode_literals
import json
import os
import subprocess
from queue import Queue
from bottle import route, run, Bottle, request, static_file
from threading import Thread
import youtube_dl
from pathlib import Path
from collections import ChainMap
from os import listdir
from os.path import isfile, join
from bottle import route, run, template
app = Bottle()
app_defaults = {
'YDL_FORMAT': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]',
'YDL_EXTRACT_AUDIO_FORMAT': None,
'YDL_EXTRACT_AUDIO_QUALITY': '192',
'YDL_RECODE_VIDEO_FORMAT': None,
'YDL_OUTPUT_TEMPLATE': '/youtube-dl/%(title)s [%(id)s].%(ext)s',
'YDL_ARCHIVE_FILE': None,
'YDL_SERVER_HOST': '0.0.0.0',
'YDL_SERVER_PORT': 8080,
}
@app.route('/youtube-dl')
def dl_queue_list():
return static_file('index.html', root='./')
@app.route('/youtube-dl/pub', method='GET')
def q_size():
downloadPath = '/youtube-dl'
completed = ["<p><a href='/youtube-dl/download/{}'>{}</a></p>".format(f,f) for f in listdir(downloadPath) if isfile(join(downloadPath, f))]
#return { "success" : True, "files" : list(completed) }
return template('{{name}}', name=completed)
@app.route('/youtube-dl/download/:filename', method='GET')
def server_static(filename):
return static_file(filename, root='/youtube-dl/')
@app.route('/youtube-dl/static/:filename#.*#')
def server_static(filename):
return static_file(filename, root='./static')
@app.route('/youtube-dl/q', method='GET')
def q_size():
return {"success": True, "size": json.dumps(list(dl_q.queue))}
@app.route('/youtube-dl/q', method='POST')
def q_put():
url = request.forms.get("url")
options = {
'format': request.forms.get("format")
}
if not url:
return {"success": False, "error": "/q called without a 'url' query param"}
dl_q.put((url, options))
print("Added url " + url + " to the download queue")
return {"success": True, "url": url, "options": options}
@app.route("/youtube-dl/update", method="GET")
def update():
command = ["pip", "install", "--upgrade", "youtube-dl"]
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = proc.communicate()
return {
"output": output.decode('ascii'),
"error": error.decode('ascii')
}
def dl_worker():
while not done:
url, options = dl_q.get()
download(url, options)
dl_q.task_done()
def get_ydl_options(request_options):
request_vars = {
'YDL_EXTRACT_AUDIO_FORMAT': None,
'YDL_RECODE_VIDEO_FORMAT': None,
}
requested_format = request_options.get('format', 'bestvideo')
if requested_format in ['aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = requested_format
elif requested_format == 'bestaudio':
request_vars['YDL_EXTRACT_AUDIO_FORMAT'] = 'best'
elif requested_format in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
request_vars['YDL_RECODE_VIDEO_FORMAT'] = requested_format
ydl_vars = ChainMap(request_vars, os.environ, app_defaults)
postprocessors = []
if(ydl_vars['YDL_EXTRACT_AUDIO_FORMAT']):
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': ydl_vars['YDL_EXTRACT_AUDIO_FORMAT'],
'preferredquality': ydl_vars['YDL_EXTRACT_AUDIO_QUALITY'],
})
if(ydl_vars['YDL_RECODE_VIDEO_FORMAT']):
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': ydl_vars['YDL_RECODE_VIDEO_FORMAT'],
})
return {
'format': ydl_vars['YDL_FORMAT'],
'postprocessors': postprocessors,
'outtmpl': ydl_vars['YDL_OUTPUT_TEMPLATE'],
'download_archive': ydl_vars['YDL_ARCHIVE_FILE']
}
def download(url, request_options):
with youtube_dl.YoutubeDL(get_ydl_options(request_options)) as ydl:
ydl.download([url])
dl_q = Queue()
done = False
dl_thread = Thread(target=dl_worker)
dl_thread.start()
print("Updating youtube-dl to the newest version")
updateResult = update()
print(updateResult["output"])
print(updateResult["error"])
print("Started download thread")
app_vars = ChainMap(os.environ, app_defaults)
app.run(host=app_vars['YDL_SERVER_HOST'], port=app_vars['YDL_SERVER_PORT'], debug=True)
done = True
dl_thread.join()
|
processify.py
|
# https://gist.github.com/Chiron1991/8199fc1a41c2107982053aba809838c6
#
# tests functions from the gist were moved to utilities.tests.test_processify
# so they can be picked up by our test runner
import sys
import traceback
from functools import wraps
from multiprocessing import Process, Queue
def processify(func):
"""
Decorator to run a function as a process.
Be sure that every argument and the return value
is *pickable*.
The created process is joined, so the code does not
run in parallel.
"""
def process_func(q, *args, **kwargs):
try:
ret = func(*args, **kwargs)
except Exception:
ex_type, ex_value, tb = sys.exc_info()
error = ex_type, ex_value, ''.join(traceback.format_tb(tb))
ret = None
else:
error = None
q.put((ret, error))
# register original function with different name
# in sys.modules so it is pickable
process_func.__name__ = func.__name__ + 'processify_func'
setattr(sys.modules[__name__], process_func.__name__, process_func)
@wraps(func)
def wrapper(*args, **kwargs):
q = Queue()
p = Process(target=process_func, args=(q,) + args, kwargs=kwargs)
p.start()
ret, error = q.get()
p.join()
if error:
ex_type, ex_value, tb_str = error
message = f'{str(ex_value)} (in subprocess)\n{tb_str}'
raise ex_type(message)
return ret
return wrapper
|
_app.py
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
WebSocketApp provides higher level APIs.
"""
import select
import sys
import threading
import time
import traceback
import ChromeREPL.libs.six as six
from ._abnf import ABNF
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
import ChromeREPL.libs.websocket._logging
__all__ = ["WebSocketApp"]
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=None,
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callable object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
on_cont_message: callback object which is called when receive continued
frame data.
on_cont_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4th argument is continue flag. if 0, the data continue
keep_running: this parameter is obosleted and ignored it.
get_mask_key: a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
subprotocols: array of available sub protocols. default is None.
"""
self.url = url
self.header = header if header is not None else []
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = False
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.")
def close(self, **kwargs):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close(**kwargs)
def _send_ping(self, interval, event):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
try:
self.sock.ping()
except Exception as ex:
_logging.warning("send_ping routine terminated: {}".format(ex))
break
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command
every specified period(second)
if set to 0, not send automatically.
ping_timeout: timeout(second) if the pong message is not received.
http_proxy_host: http proxy host name.
http_proxy_port: http proxy port. If not set, set to 80.
http_no_proxy: host names, which doesn't use proxy.
skip_utf8_validation: skip utf8 validation.
host: update host header.
origin: update origin header.
"""
if not ping_timeout or ping_timeout <= 0:
ping_timeout = None
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
close_frame = None
self.keep_running = True
try:
self.sock = WebSocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message and True or False,
skip_utf8_validation=skip_utf8_validation)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
while self.sock.connected:
r, w, e = select.select(
(self.sock.sock, ), (), (), ping_timeout or 10) # Use a 10 second timeout to avoid to wait forever on close
if not self.keep_running:
break
if r:
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
close_frame = frame
break
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, frame.data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and op_code == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
if ping_timeout and self.last_ping_tm \
and time.time() - self.last_ping_tm > ping_timeout \
and self.last_ping_tm - self.last_pong_tm > ping_timeout:
raise WebSocketTimeoutException("ping/pong timed out")
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
finally:
if thread and thread.isAlive():
event.set()
thread.join()
self.keep_running = False
self.sock.close()
close_args = self._get_close_args(
close_frame.data if close_frame else None)
self._callback(self.on_close, *close_args)
self.sock = None
def _get_close_args(self, data):
""" this functions extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments """
import inspect
# if the on_close callback is "old", just return empty list
if sys.version_info < (3, 0):
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
else:
if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256 * six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as e:
_logging.error("error from callback {}: {}".format(callback, e))
if _logging.isEnabledForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
generate_REAL.py
|
import imageio
import os
import glob
import numpy as np
import tensorflow as tf
import threading
from time import time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--labelpath', type=str, dest='labelpath', default='DIV2K_train_HR/*.png')
parser.add_argument('--datapath', type=str, dest='datapath', default='DIV2K_train_REAL_NOISE/*.png')
parser.add_argument('--labelpath2', type=str, dest='labelpath2', default='SIDD/GT/*.PNG')
parser.add_argument('--datapath2', type=str, dest='datapath2', default='SIDD/NOISY/*.PNG')
args=parser.parse_args()
labelpath=args.labelpath
datapath=args.datapath
labelpath2=args.labelpath2
datapath2=args.datapath2
tfrecord_file = "train_REAL_NOISE.tfrecord"
patches=[]
labels=[]
def imread(path):
img = imageio.imread(path)
return img
def gradients(x):
return np.mean(((x[:-1, :-1, :] - x[1:, :-1, :]) ** 2 + (x[:-1, :-1, :] - x[:-1, 1:, :]) ** 2))
def modcrop(imgs, modulo):
sz=imgs.shape
sz=np.asarray(sz)
if len(sz)==2:
sz = sz - sz% modulo
out = imgs[0:sz[0], 0:sz[1]]
elif len(sz)==3:
szt = sz[0:2]
szt = szt - szt % modulo
out = imgs[0:szt[0], 0:szt[1],:]
return out
def patch_generate_list(data_path,label_path,patch_h,patch_w,stride, start_num, end_num, name, grad=True):
label_list=np.sort(np.asarray(glob.glob(label_path)))
img_list = np.sort(np.asarray(glob.glob(data_path)))
offset=0
fileNum=len(label_list)
count=0
for n in range(start_num, end_num):
print('%s [%d/%d]' % (name, (n+1), fileNum))
img=imread(img_list[n])
label=imread(label_list[n])
x,y,ch=label.shape
for i in range(0+offset,x-patch_h+1,stride):
for j in range(0+offset,y-patch_w+1,stride):
patch_d = img[i:i + patch_h, j:j + patch_w]
patch_l = label[i:i + patch_h, j:j + patch_w]
count += 1
if grad:
if np.log(gradients(patch_l.astype(np.float64)/255.)+1e-10) >= -5.8:
patches.append(patch_d.tobytes())
labels.append(patch_l.tobytes())
else:
patches.append(patch_d.tobytes())
labels.append(patch_l.tobytes())
print('Total Patches: ', count)
def patch_to_tfrecord(tfrecord_file, labels, patches):
np.random.seed(36)
np.random.shuffle(labels)
np.random.seed(36)
np.random.shuffle(patches)
print('Selected: ', len(labels), len(patches))
writer = tf.python_io.TFRecordWriter(tfrecord_file)
for i in range(len(patches)):
if i % 10000 ==0:
print('[%d/%d] processed' % ((i+1), len(patches)))
write_to_tfrecord(writer, labels[i], patches[i])
writer.close()
def write_to_tfrecord(writer, label, binary_image):
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_image]))
}))
writer.write(example.SerializeToString())
return
t1=time()
threads=[]
for idx in range(8):
thread=threading.Thread(target=patch_generate_list, args=(datapath,labelpath, 256,256,180, idx*100, (idx+1)*100, 'DIV2K', True))
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join()
data1_num=len(labels)
t2=time()
print('DIV2K:', data1_num, 'Time: %.4f' % ((t2-t1)))
threads=[]
for idx in range(8):
thread=threading.Thread(target=patch_generate_list, args=(datapath2,labelpath2, 256,256,180, idx*40, (idx+1)*40, 'SIDD', False))
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join()
t3=time()
print('SIDD:', len(labels)-data1_num, 'Time: %.4f' % ((t3-t2)))
print('*********** Patch To TFRecord ************')
patch_to_tfrecord(tfrecord_file, labels, patches)
t4=time()
print('TFRecord Time: %.4f, Overall Time: %.4f' % ((t4-t3), (t4-t1)))
print('Done')
|
record_audio.py
|
# coding: utf8
import pyaudio
import wave
from threading import Thread
import load_file as lf
import tensorflow as tf
import numpy as np
import speech2text as sp2t
import threading
import requests
from pydub import AudioSegment
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 1
RECORD_OUTPUT_NAME = "record-"
RECORD_OUTPUT_FOLDER = "./record/"
SPEECH_TO_TEXT_RECORD_FOLDER = "./speech/"
SPEECH_TO_TEXT_RECORD_NAME = "speech"
list_ = []
file_name = [""]
n_dim = 80
n_classes = 3
sd = 1 / np.sqrt(n_dim)
# GradientDescentOptimizer
n_hidden_units_one = 280
n_hidden_units_two = 350
# AdamOptimizer
n_hidden_1 = 256
n_hidden_2 = 256
n_hidden_3 = 256
lock = threading.Lock()
def join_file(list_file, count):
"""
Join files in list, save new file in folder "Speech"
:param list_file:
:param count:
:return: Path of new file
"""
data = []
for infile, _ in list_file:
w = wave.open(infile, 'rb')
data.append([w.getparams(), w.readframes(w.getnframes())])
w.close()
name = SPEECH_TO_TEXT_RECORD_FOLDER + SPEECH_TO_TEXT_RECORD_NAME + "-" + str(list_file[0][1]) + str(count) + ".wav"
output = wave.open(name, 'wb')
output.setparams(data[0][0])
num = len(data)
for i in range(0, num):
output.writeframes(data[i][1])
output.close()
return name
def check_silence(dir):
audio = AudioSegment.from_wav(dir)
vol = audio.rms
print dir + ": " + str(vol)
if vol < 102:
return True, vol
else:
return False, vol
# def classifier(features_test):
# """
# :param features_test: mfcc feature of file
# Feed data to restored model
# :return: Predict label of data
# """
# features_test = features_test.reshape(1, 80)
# weights = {
# 'h1': tf.Variable(tf.random_normal([n_dim, n_hidden_1], mean=0, stddev=sd)),
# 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], mean=0, stddev=sd)),
# 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], mean=0, stddev=sd)),
# 'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes], mean=0, stddev=sd))
# }
# biases = {
# 'b1': tf.Variable(tf.random_normal([n_hidden_1], mean=0, stddev=sd)),
# 'b2': tf.Variable(tf.random_normal([n_hidden_2], mean=0, stddev=sd)),
# 'b3': tf.Variable(tf.random_normal([n_hidden_3], mean=0, stddev=sd)),
# 'out': tf.Variable(tf.random_normal([n_classes], mean=0, stddev=sd))
# }
# X = tf.placeholder(tf.float32, [None, n_dim])
# Y = tf.placeholder(tf.float32, [None, n_classes])
#
# layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])
# layer_1 = tf.nn.tanh(layer_1)
# # Hidden layer with RELU activation
# layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# layer_2 = tf.nn.sigmoid(layer_2)
# # Hidden layer with RELU activation
# layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
# layer_3 = tf.nn.sigmoid(layer_3)
# # Output layer with linear activation
# out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
# y_ = tf.nn.softmax(out_layer)
# sess = tf.InteractiveSession()
# sess.run(tf.global_variables_initializer())
# # Restore the model
# tf.train.Saver().restore(sess, "./adammodel/data")
# y_pred = sess.run(tf.argmax(y_, 1), feed_dict={X: features_test})
# y_tt = sess.run(y_, feed_dict={X: features_test})
# users = open("USER.txt", "r")
# list_users = []
# for user in users:
# user = user.split("\n")[0]
# list_users.append(user)
# return list_users[y_pred[0]], y_tt
def classifier(features_test):
"""
:param features_test: mfcc feature of file
Feed data to restored model
:return: Predict label of data
"""
features_test = features_test.reshape(1, 80)
# X is the input array, containing mfccs data
X = tf.placeholder(tf.float32, [None, n_dim])
# Y contains true labels output
Y = tf.placeholder(tf.float32, [None, n_classes])
# Multi-layer neural network
W_1 = tf.Variable(tf.random_normal([n_dim, n_hidden_units_one], mean=0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean=0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X, W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one, n_hidden_units_two], mean=0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean=0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1, W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two, n_classes], mean=0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean=0, stddev=sd))
# Output calc(Result)
y_ = tf.nn.softmax(tf.matmul(h_2, W) + b)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# Restore the model
tf.train.Saver().restore(sess, "./model/data")
y_pred = sess.run(tf.argmax(y_, 1), feed_dict={X: features_test})
y_tt = sess.run(y_, feed_dict={X: features_test})
users = open("USER.txt", "r")
list_users = []
for user in users:
user = user.split("\n")[0]
list_users.append(user)
return list_users[y_pred[0]], y_tt
def post_text(name, text):
"""
POST json file to web service
:param name: Name of speaker
:param text:
:return: None
"""
item = {"name": name, "text": text}
print item
respond = requests.post('https://example.com', json=item)
if respond.status_code != 201:
print respond.status_code
# def save_file(frame, p, count):
# """Save recorded file and check list"""
# if count == -1:
# print("Start Record!!!")
# return 0
# lock.acquire()
# # Save file
# name = RECORD_OUTPUT_FOLDER + RECORD_OUTPUT_NAME + str(count) + ".wav"
# wf = wave.open(name, 'wb')
# wf.setnchannels(CHANNELS)
# wf.setsampwidth(p)
# wf.setframerate(RATE)
# wf.writeframes(b''.join(frame))
# wf.close()
# # print "File saved in: " + name
# """
# check list: if list null, add current file to list (file_name, class, count)
# else if: sub(current_count,first_element_count) < 2, append current file to list
# else if: sub(current_count,first_element_count) >= 2, check equal(current_class, first_element_class)
# if True: append current file to list, join all files in list and convert to text, list = []
# if False: join all files in list and convert to text, list = [], add current file to list
# """
# # Check silence
# vol = check_silence(name)
#
# # Get mfcc
# features_test = lf.extract_feature(name)
#
# if len(list_) == 0:
# label, o = classifier(features_test)
# list_.append([name, label, count])
# else:
# first_element_count = list_[0][2]
# if (count - first_element_count) < 2:
# label, o = classifier(features_test)
# list_.append([name, label, count])
# else:
# label, o = classifier(features_test)
# first_element_class = list_[0][1]
# if label == first_element_class:
# list_.append([name, label, count])
# conversations = open("log.txt", "a")
# conversations.write(str(list_))
# conversations.write('\n\n')
# conversations.write(str(o))
# conversations.write('\n\n')
# conversations.write(str(count)+": "+str(vol))
# conversations.write("####################")
# conversations.write('\n\n')
# # join 3 file in list
# speech = join_file(list_, count)
# text = sp2t.speech_2_text(speech)
# # clear list
# list_[:] = []
#
# else:
# second_element_class = list_[1][1]
# if first_element_class == second_element_class:
# # join 2 file in list
# speech = join_file(list_, count)
# text = sp2t.speech_2_text(speech)
# # clear list
# list_[:] = []
# list_.append([name, label, count])
# else:
# list_.append([name, label, count])
# conversations = open("log.txt", "a")
# conversations.write(str(list_))
# conversations.write('\n\n')
# conversations.write(str(o))
# conversations.write('\n\n')
# conversations.write(str(count)+": "+str(vol))
# conversations.write("####################")
# conversations.write('\n\n')
# speech = join_file(list_, count)
# text = sp2t.speech_2_text(speech)
# first_element_class = second_element_class
# list_[:] = []
#
# result = first_element_class + ": " + text.encode("utf8")
# # result = unicode(result, errors='ignore')
# post_text(first_element_class, text)
# # Write speech-text to file
# conversations = open("CONVERSATIONS.txt", "a")
# conversations.write(result)
# conversations.write('\n\n')
# # Write list_ log to file
# conversations = open("log.txt", "a")
# conversations.write(str(list_))
# conversations.write('\n\n')
# conversations.write(str(o))
# conversations.write('\n\n')
# conversations.write(str(count)+": "+str(vol))
# conversations.write("####################")
# conversations.write('\n\n')
# lock.release()
def save_file(frame, sample_size, count):
"""Save recorded file and check list"""
if count == -1:
print("Start Record!!!")
return 0
lock.acquire()
# Save file
name = RECORD_OUTPUT_FOLDER + RECORD_OUTPUT_NAME + str(count) + ".wav"
wf = wave.open(name, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(sample_size)
wf.setframerate(RATE)
wf.writeframes(b''.join(frame))
wf.close()
# Check silence
vol, val = check_silence(name)
# If true, classify and speech-to-text if len > 1s
# Else, if len < 10s, append current file (1s) to list, else
if vol:
if len(list_) > 1:
speech = join_file(list_, count)
text = sp2t.speech_2_text(speech)
list_[:] = []
features = lf.extract_feature(speech)
label, o = classifier(features)
result_text = str(label) + " :" + str(text)
# Write speech-text to file
conversations = open("CONVERSATIONS.txt", "a")
conversations.write(result_text)
conversations.write('\n\n')
conversations.write(str(o))
conversations.write('\n\n')
else:
list_[:] = []
else:
if len(list_) < 10:
list_.append([name, count])
conversations = open("log.txt", "a")
conversations.write(str(list_))
conversations.write('\n\n')
conversations.write(str(count) + ": " + str(val))
conversations.write("####################")
conversations.write('\n\n')
else:
speech = join_file(list_, count)
text = sp2t.speech_2_text(speech)
list_[:] = []
features = lf.extract_feature(speech)
label, o = classifier(features)
result_text = str(label) + " :" + str(text)
# Write speech-text to file
conversations = open("CONVERSATIONS.txt", "a")
conversations.write(result_text)
conversations.write('\n\n')
conversations.write(str(o))
conversations.write('\n\n')
list_.append([name, count])
conversations = open("log.txt", "a")
conversations.write(str(list_))
conversations.write('\n\n')
conversations.write(str(count) + ": " + str(val))
conversations.write("####################")
conversations.write('\n\n')
lock.release()
def record():
"""Start recording, save to file every X seconds"""
range_ = int(RATE / CHUNK * RECORD_SECONDS)
p = pyaudio.PyAudio()
sample_size = p.get_sample_size(FORMAT)
count = -1
#wf = wave.open("1_speech-4245.wav",'rb')
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while True:
frames = []
for i in range(0, range_):
data = stream.read(CHUNK)
frames.append(data)
new_frame = frames
savefile = Thread(name="savefile", target=save_file, args=(new_frame, sample_size, count,))
savefile.setDaemon(True)
savefile.start()
count = count + 1
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == '__main__':
record()
|
build.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import collections
import fnmatch
import hashlib
import json
import os
import re
import shutil
import subprocess
import threading
import python_utils
from scripts import common
from scripts import servers
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', ''),
'out_dir': os.path.join('build', 'templates', '')
}
WEBPACK_DIRNAMES_TO_DIRPATHS = {
'staging_dir': os.path.join('backend_prod_files', 'webpack_bundles', ''),
'out_dir': os.path.join('build', 'webpack_bundles', '')
}
# This json file contains a json object. The object's keys are file paths and
# the values are corresponded hash value. The paths need to be in posix style,
# as it is interpreted by the `url-interpolation` service, which which
# interprets the paths in this file as URLs.
HASHES_JSON_FILENAME = 'hashes.json'
HASHES_JSON_FILEPATH = os.path.join('assets', HASHES_JSON_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
os.pardir, 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_DEV_CONFIG = 'webpack.dev.config.ts'
WEBPACK_DEV_SOURCE_MAPS_CONFIG = 'webpack.dev.sourcemap.config.ts'
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
WEBPACK_PROD_SOURCE_MAPS_CONFIG = 'webpack.prod.sourcemap.config.ts'
WEBPACK_TERSER_CONFIG = 'webpack.terser.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts', '.gitkeep')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc', '.DS_Store')
JS_FILEPATHS_NOT_TO_BUILD = (
os.path.join(
'core', 'templates', 'expressions', 'parser.js'),
os.path.join('extensions', 'ckeditor_plugins', 'pre', 'plugin.js')
)
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'webpack_bundles/*',
)
PAGES_IN_APP_YAML = (
'webpack_bundles/about-page.mainpage.html',
'webpack_bundles/contact-page.mainpage.html',
'webpack_bundles/donate-page.mainpage.html',
'webpack_bundles/get-started-page.mainpage.html',
'webpack_bundles/license.mainpage.html',
'webpack_bundles/login-page.mainpage.html',
'webpack_bundles/logout-page.mainpage.html',
'webpack_bundles/partnerships-page.mainpage.html',
'webpack_bundles/privacy-page.mainpage.html',
'webpack_bundles/playbook.mainpage.html',
'webpack_bundles/teach-page.mainpage.html',
'webpack_bundles/terms-page.mainpage.html',
'webpack_bundles/thanks-page.mainpage.html'
)
# NOTE: These pages manage user sessions. Thus, we should never reject or
# replace them when running in maintenance mode; otherwise admins will be unable
# to access the site.
AUTH_PAGE_PATHS = (
'webpack_bundles/login-page.mainpage.html',
'webpack_bundles/logout-page.mainpage.html',
)
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*.component.html',
'*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json', '*.webp')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
_PARSER = argparse.ArgumentParser(
description="""
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
""")
_PARSER.add_argument(
'--prod_env', action='store_true', default=False, dest='prod_env')
_PARSER.add_argument(
'--deploy_mode', action='store_true', default=False, dest='deploy_mode')
_PARSER.add_argument(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
_PARSER.add_argument(
'--deparallelize_terser',
action='store_true',
default=False,
dest='deparallelize_terser',
help='Disable parallelism on terser plugin in webpack. Use with prod_env.')
_PARSER.add_argument(
'--maintenance_mode',
action='store_true',
default=False,
dest='maintenance_mode',
help=(
'Enable maintenance mode, '
'meaning that only super admins can access the site.'
)
)
_PARSER.add_argument(
'--source_maps',
action='store_true',
default=False,
dest='source_maps',
help='Build webpack with source maps.')
def generate_app_yaml(deploy_mode=False, maintenance_mode=False):
"""Generate app.yaml from app_dev.yaml.
Args:
deploy_mode: bool. Whether the script is being called from deploy
script.
maintenance_mode: bool. Whether the site should be put into
maintenance mode.
"""
prod_file_prefix = 'build/'
maintenance_page_path = 'webpack_bundles/maintenance-page.mainpage.html'
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with python_utils.open_file(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
for file_path in PAGES_IN_APP_YAML:
if maintenance_mode and file_path not in AUTH_PAGE_PATHS:
content = content.replace(
file_path, prod_file_prefix + maintenance_page_path)
else:
content = content.replace(
file_path, prod_file_prefix + file_path)
if deploy_mode:
# The version: default line is required to run jobs on a local server (
# both in prod & non-prod env). This line is not required when app.yaml
# is generated during deployment. So, we remove this if the build
# process is being run from the deploy script.
content = content.replace('version: default', '')
# The FIREBASE_AUTH_EMULATOR_HOST environment variable is only needed to
# test locally, and MUST NOT be included in the deployed file.
content = re.sub(' FIREBASE_AUTH_EMULATOR_HOST: ".*"\n', '', content)
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with python_utils.open_file(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def modify_constants(
prod_env=False, emulator_mode=True, maintenance_mode=False):
"""Modify constants.ts and feconf.py.
Args:
prod_env: bool. Whether the server is started in prod mode.
emulator_mode: bool. Whether the server is started in emulator mode.
maintenance_mode: bool. Whether the site should be put into
the maintenance mode.
"""
dev_mode_variable = (
'"DEV_MODE": false' if prod_env else '"DEV_MODE": true')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"DEV_MODE": (true|false)',
dev_mode_variable)
emulator_mode_variable = (
'"EMULATOR_MODE": true' if emulator_mode else '"EMULATOR_MODE": false')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"EMULATOR_MODE": (true|false)',
emulator_mode_variable
)
enable_maintenance_mode_variable = (
'ENABLE_MAINTENANCE_MODE = %s' % python_utils.UNICODE(maintenance_mode))
common.inplace_replace_file(
common.FECONF_PATH,
r'ENABLE_MAINTENANCE_MODE = (True|False)',
enable_maintenance_mode_variable)
def set_constants_to_default():
"""Set variables in constants.ts and feconf.py to default values."""
modify_constants(prod_env=False, emulator_mode=True, maintenance_mode=False)
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
# Use relative path to avoid java command line parameter parse error on
# Windows. Convert to posix style path because the java program requires
# the filepath arguments to be in posix path style.
target_path = common.convert_to_posixpath(
os.path.relpath(target_path))
source_path = common.convert_to_posixpath(
os.path.relpath(source_path))
yuicompressor_dir = common.convert_to_posixpath(YUICOMPRESSOR_DIR)
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
yuicompressor_dir, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(python_utils.UNICODE(content))
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with python_utils.open_file(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
python_utils.PRINT('Minifying and creating sourcemap for %s' % source_path)
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
common.NODE_BIN_PATH, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued to be
processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to ensure
exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError. One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
python_utils.PRINT(
'Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
"""
write_to_file_stream(
target_file_stream, REMOVE_WS(' ', source_file_stream.read()))
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with python_utils.open_file(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
minified_third_party_js_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
minified_third_party_css_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
third_party_js_filepath, minified_third_party_js_filepath)
_minify(third_party_css_filepath, minified_third_party_css_filepath)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(third_party_js_filepath)
safe_delete_file(third_party_css_filepath)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
python_utils.PRINT(
'Building third party libs at %s' % third_party_directory_path)
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
webfonts_dir = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(third_party_js_filepath)
with python_utils.open_file(
third_party_js_filepath, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(third_party_css_filepath)
with python_utils.open_file(
third_party_css_filepath, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(webfonts_dir)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], webfonts_dir))
def build_using_webpack(config_path):
"""Execute webpack build process. This takes all TypeScript files we have in
/templates and generates JS bundles according the require() imports
and also compiles HTML pages into the /backend_prod_files/webpack_bundles
folder. The files are later copied into /build/webpack_bundles.
Args:
config_path: str. Webpack config to be used for building.
"""
python_utils.PRINT('Building webpack')
managed_webpack_compiler = servers.managed_webpack_compiler(
config_path=config_path, max_old_space_size=4096)
with managed_webpack_compiler as p:
p.wait()
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return all(
not filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Copying into %s' % os.path.join(os.getcwd(), target))
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT('Copying %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
relative_path = common.convert_to_posixpath(
os.path.relpath(source_path, source))
if (hash_should_be_inserted(source + relative_path) and
relative_path in file_hashes):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
python_utils.PRINT(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.items():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def save_hashes_to_file(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
ensure_directory_exists(HASHES_JSON_FILEPATH)
with python_utils.open_file(HASHES_JSON_FILEPATH, 'w+') as hashes_json_file:
hashes_json_file.write(
python_utils.UNICODE(
json.dumps(filtered_hashes, ensure_ascii=False)))
hashes_json_file.write(u'\n')
def minify_func(source_path, target_path, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
python_utils.PRINT('Building %s' % source_path)
with python_utils.open_file(source_path, 'r+') as source_html_file:
with python_utils.open_file(
target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
python_utils.PRINT('Minifying %s' % source_path)
_minify(source_path, target_path)
else:
python_utils.PRINT('Copying %s' % source_path)
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError:
raise OSError('threads can only be started once')
def generate_build_tasks_to_build_all_files_in_directory(source, target):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Generating into %s' % os.path.join(os.getcwd(), target))
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT(
'Building directory %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
python_utils.PRINT(
'Scanning directory %s to remove deleted file' % staging_directory)
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# On Windows the path is on Windows-Style, while the path in
# hashes is in posix style, we need to convert it so the check
# can run correctly.
relative_path = common.convert_to_posixpath(
os.path.relpath(target_path, staging_directory))
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
python_utils.PRINT(
'Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
file_extensions_not_to_track = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.items():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in file_extensions_not_to_track):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
python_utils.PRINT(
'The following files will be rebuilt due to recent changes: %s'
% recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
python_utils.PRINT('Creating new %s folder' % staging_dir)
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
python_utils.PRINT(
'Staging dir exists, re-building all %s files'
% ', '.join(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild)
dev_dir_hashes = get_file_hashes(source_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
python_utils.PRINT(
'Getting files that have changed between %s and %s'
% (source_dir, out_dir))
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
python_utils.PRINT(
'Re-building recently changed files at %s' % source_dir)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames)
else:
python_utils.PRINT(
'No changes detected. Using previously built files.')
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError. The hash dict is empty.
ValueError. Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError. The filename does not contain hash.
KeyError. The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
if relative_filepath not in file_hashes:
return
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.json, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JSON_FILENAME, file_hashes[HASHES_JSON_FILENAME])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_hashes():
"""Generates hashes for files."""
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
# Create hashes for all directories and files.
hash_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for hash_dir in hash_dirs:
hashes.update(get_file_hashes(hash_dir))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
save_hashes_to_file(hashes)
# Update hash dict with newly created hashes.json.
hashes.update(
{HASHES_JSON_FILENAME: generate_md5_hash(HASHES_JSON_FILEPATH)})
# Make sure /assets/hashes.json is available to the frontend.
_ensure_files_exist([HASHES_JSON_FILEPATH])
return hashes
def generate_build_directory(hashes):
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
python_utils.PRINT('Building Oppia in production mode...')
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
copy_input_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR,
WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
copy_output_dirs = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR, WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
assert len(copy_input_dirs) == len(copy_output_dirs)
for i in python_utils.RANGE(len(copy_input_dirs)):
safe_delete_directory_tree(copy_output_dirs[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
copy_input_dirs[i], copy_output_dirs[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(copy_output_dirs, hashes)
source_dirs_for_assets = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_assets = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(source_dirs_for_assets, output_dirs_for_assets)
source_dirs_for_third_party = [THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_third_party = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
source_dirs_for_third_party, output_dirs_for_third_party)
source_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
output_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(
source_dirs_for_webpack, output_dirs_for_webpack)
source_dirs_for_extensions = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_extensions = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_extensions, output_dirs_for_extensions)
source_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_templates, output_dirs_for_templates)
python_utils.PRINT('Build completed.')
def main(args=None):
"""The main method of this script."""
options = _PARSER.parse_args(args=args)
if options.maintenance_mode and not options.prod_env:
raise Exception(
'maintenance_mode should only be enabled in prod build.')
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only:
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
return
else:
raise Exception(
'minify_third_party_libs_only should not be '
'set in non-prod env.')
modify_constants(
prod_env=options.prod_env,
emulator_mode=not options.deploy_mode,
maintenance_mode=options.maintenance_mode)
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
hashes = generate_hashes()
if options.deparallelize_terser:
if options.source_maps:
raise Exception(
'source_maps flag shouldn\'t be used with '
'deparallelize_terser flag.')
build_using_webpack(WEBPACK_TERSER_CONFIG)
elif options.source_maps:
build_using_webpack(WEBPACK_PROD_SOURCE_MAPS_CONFIG)
else:
build_using_webpack(WEBPACK_PROD_CONFIG)
generate_app_yaml(
deploy_mode=options.deploy_mode,
maintenance_mode=options.maintenance_mode)
generate_build_directory(hashes)
save_hashes_to_file(dict())
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
test_unittest_tools.py
|
# ------------------------------------------------------------------------------
# Copyright (c) 2005-2013, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
# ------------------------------------------------------------------------------
import threading
import time
import warnings
import six
import six.moves as sm
from traits import _py2to3
from traits.testing.unittest_tools import unittest
from traits.api import (
Bool,
Event,
Float,
HasTraits,
Int,
List,
on_trait_change,
)
from traits.testing.api import UnittestTools
from traits.util.api import deprecated
@deprecated("This function is outdated. Use 'shiny' instead!")
def old_and_dull():
""" A deprecated function, for use in assertDeprecated tests.
"""
pass
class TestObject(HasTraits):
number = Float(2.0)
list_of_numbers = List(Float)
flag = Bool
@on_trait_change("number")
def _add_number_to_list(self, value):
self.list_of_numbers.append(value)
def add_to_number(self, value):
self.number += value
class UnittestToolsTestCase(unittest.TestCase, UnittestTools):
def setUp(self):
self.test_object = TestObject()
def test_when_using_with(self):
""" Check normal use cases as a context manager.
"""
test_object = self.test_object
# Change event should NOT BE detected
with self.assertTraitDoesNotChange(test_object, "number") as result:
test_object.flag = True
test_object.number = 2.0
msg = "The assertion result is not None: {0}".format(result.event)
self.assertIsNone(result.event, msg=msg)
# Change event should BE detected
with self.assertTraitChanges(test_object, "number") as result:
test_object.flag = False
test_object.number = 5.0
expected = (test_object, "number", 2.0, 5.0)
self.assertSequenceEqual(expected, result.event)
# Change event should BE detected exactly 2 times
with self.assertTraitChanges(test_object, "number", count=2) as result:
test_object.flag = False
test_object.number = 4.0
test_object.number = 3.0
expected = [
(test_object, "number", 5.0, 4.0),
(test_object, "number", 4.0, 3.0),
]
self.assertSequenceEqual(expected, result.events)
self.assertSequenceEqual(expected[-1], result.event)
# Change event should BE detected
with self.assertTraitChanges(test_object, "number") as result:
test_object.flag = True
test_object.add_to_number(10.0)
expected = (test_object, "number", 3.0, 13.0)
self.assertSequenceEqual(expected, result.event)
# Change event should BE detected exactly 3 times
with self.assertTraitChanges(test_object, "number", count=3) as result:
test_object.flag = True
test_object.add_to_number(10.0)
test_object.add_to_number(10.0)
test_object.add_to_number(10.0)
expected = [
(test_object, "number", 13.0, 23.0),
(test_object, "number", 23.0, 33.0),
(test_object, "number", 33.0, 43.0),
]
self.assertSequenceEqual(expected, result.events)
self.assertSequenceEqual(expected[-1], result.event)
def test_assert_multi_changes(self):
test_object = self.test_object
# Change event should NOT BE detected
with self.assertMultiTraitChanges(
[test_object], [], ["flag", "number", "list_of_numbers[]"]
) as results:
test_object.number = 2.0
events = list(filter(bool, (result.event for result in results)))
msg = "The assertion result is not None: {0}".format(", ".join(events))
self.assertFalse(events, msg=msg)
# Change event should BE detected
with self.assertMultiTraitChanges(
[test_object], ["number", "list_of_numbers[]"], ["flag"]
) as results:
test_object.number = 5.0
events = list(filter(bool, (result.event for result in results)))
msg = "The assertion result is None"
self.assertTrue(events, msg=msg)
def test_when_using_functions(self):
test_object = self.test_object
# Change event should BE detected
self.assertTraitChanges(
test_object, "number", 1, test_object.add_to_number, 13.0
)
# Change event should NOT BE detected
self.assertTraitDoesNotChange(
test_object, "flag", test_object.add_to_number, 13.0
)
def test_indirect_events(self):
""" Check catching indirect change events.
"""
test_object = self.test_object
# Change event should BE detected
with self.assertTraitChanges(
test_object, "list_of_numbers[]"
) as result:
test_object.flag = True
test_object.number = -3.0
expected = (test_object, "list_of_numbers_items", [], [-3.0])
self.assertSequenceEqual(expected, result.event)
def test_exception_inside_context(self):
""" Check that exception inside the context statement block are
propagated.
"""
test_object = self.test_object
with self.assertRaises(AttributeError):
with self.assertTraitChanges(test_object, "number"):
test_object.i_do_exist
with self.assertRaises(AttributeError):
with self.assertTraitDoesNotChange(test_object, "number"):
test_object.i_do_exist
def test_non_change_on_failure(self):
""" Check behaviour when assertion should be raised for non trait
change.
"""
test_object = self.test_object
traits = "flag, number"
with self.assertRaises(AssertionError):
with self.assertTraitDoesNotChange(test_object, traits) as result:
test_object.flag = True
test_object.number = -3.0
expected = [
(test_object, "flag", False, True),
(test_object, "number", 2.0, -3.0),
]
self.assertEqual(result.events, expected)
def test_change_on_failure(self):
""" Check behaviour when assertion should be raised for trait change.
"""
test_object = self.test_object
with self.assertRaises(AssertionError):
with self.assertTraitChanges(test_object, "number") as result:
test_object.flag = True
self.assertEqual(result.events, [])
# Change event will not be fired 3 times
with self.assertRaises(AssertionError):
with self.assertTraitChanges(
test_object, "number", count=3
) as result:
test_object.flag = True
test_object.add_to_number(10.0)
test_object.add_to_number(10.0)
expected = [
(test_object, "number", 2.0, 12.0),
(test_object, "number", 12.0, 22.0),
]
self.assertSequenceEqual(expected, result.events)
def test_asserts_in_context_block(self):
""" Make sure that the traits context manager does not stop
regular assertions inside the managed code block from happening.
"""
test_object = TestObject(number=16.0)
with self.assertTraitDoesNotChange(test_object, "number"):
self.assertEqual(test_object.number, 16.0)
with six.assertRaisesRegex(self, AssertionError, r"16\.0 != 12\.0"):
with self.assertTraitDoesNotChange(test_object, "number"):
self.assertEqual(test_object.number, 12.0)
def test_special_case_for_count(self):
""" Count equal to 0 should be valid but it is discouraged.
"""
test_object = TestObject(number=16.0)
with self.assertTraitChanges(test_object, "number", count=0):
test_object.flag = True
def test_assert_trait_changes_async(self):
# Exercise assertTraitChangesAsync.
thread_count = 10
events_per_thread = 1000
class A(HasTraits):
event = Event
a = A()
def thread_target(obj, count):
"Fire obj.event 'count' times."
for _ in sm.range(count):
obj.event = True
threads = [
threading.Thread(target=thread_target, args=(a, events_per_thread))
for _ in sm.range(thread_count)
]
expected_count = thread_count * events_per_thread
with self.assertTraitChangesAsync(
a, "event", expected_count, timeout=60.0
):
for t in threads:
t.start()
for t in threads:
t.join()
def test_assert_trait_changes_async_events(self):
# Check access to the events after the with
# block completes.
thread_count = 10
events_per_thread = 100
class A(HasTraits):
event = Event(Int)
a = A()
def thread_target(obj, count):
"Fire obj.event 'count' times."
for n in sm.range(count):
time.sleep(0.001)
obj.event = n
threads = [
threading.Thread(target=thread_target, args=(a, events_per_thread))
for _ in sm.range(thread_count)
]
expected_count = thread_count * events_per_thread
with self.assertTraitChangesAsync(
a, "event", expected_count, timeout=60.0
) as event_collector:
for t in threads:
t.start()
for t in threads:
t.join()
_py2to3.assertCountEqual(
self,
event_collector.events,
list(sm.range(events_per_thread)) * thread_count,
)
def test_assert_trait_changes_async_failure(self):
# Exercise assertTraitChangesAsync.
thread_count = 10
events_per_thread = 10000
class A(HasTraits):
event = Event
a = A()
def thread_target(obj, count):
"Fire obj.event 'count' times."
for _ in sm.range(count):
obj.event = True
threads = [
threading.Thread(target=thread_target, args=(a, events_per_thread))
for _ in sm.range(thread_count)
]
expected_count = thread_count * events_per_thread
with self.assertRaises(AssertionError):
with self.assertTraitChangesAsync(a, "event", expected_count + 1):
for t in threads:
t.start()
for t in threads:
t.join()
def test_assert_eventually_true_fails_on_timeout(self):
class A(HasTraits):
foo = Bool(False)
a = A()
def condition(a_object):
return a_object.foo
with self.assertRaises(self.failureException):
self.assertEventuallyTrue(
condition=condition, obj=a, trait="foo", timeout=1.0
)
def test_assert_eventually_true_passes_when_condition_becomes_true(self):
class A(HasTraits):
foo = Bool(False)
def condition(a_object):
return a_object.foo
a = A()
def thread_target(a):
time.sleep(1.0)
a.foo = True
t = threading.Thread(target=thread_target, args=(a,))
t.start()
self.assertEventuallyTrue(
condition=condition, obj=a, trait="foo", timeout=10.0
)
t.join()
def test_assert_eventually_true_passes_when_condition_starts_true(self):
class A(HasTraits):
foo = Bool(True)
def condition(a_object):
return a_object.foo
a = A()
self.assertEventuallyTrue(
condition=condition, obj=a, trait="foo", timeout=10.0
)
def test_assert_deprecated(self):
with self.assertDeprecated():
old_and_dull()
def test_assert_deprecated_failures(self):
with self.assertRaises(self.failureException):
with self.assertDeprecated():
pass
def test_assert_deprecated_when_warning_already_issued(self):
# Exercise a problematic case where previous calls to a function or
# method that issues a DeprecationWarning have already polluted the
# __warningregistry__. For this, we need a single call-point to
# old_and_dull, since distinct call-points have separate entries in
# __warningregistry__.
def old_and_dull_caller():
old_and_dull()
# Pollute the registry by pre-calling the function.
old_and_dull_caller()
# Check that we can still detect the DeprecationWarning.
with self.assertDeprecated():
old_and_dull_caller()
def test_assert_not_deprecated_failures(self):
with self.assertRaises(self.failureException):
with self.assertNotDeprecated():
old_and_dull()
def test_assert_not_deprecated(self):
with self.assertNotDeprecated():
pass
def test_assert_not_deprecated_when_warning_already_issued(self):
# Exercise a problematic case where previous calls to a function or
# method that issues a DeprecationWarning have already polluted the
# __warningregistry__. For this, we need a single call-point to
# old_and_dull, since distinct call-points have separate entries in
# __warningregistry__.
def old_and_dull_caller():
old_and_dull()
# Pollute the registry by pre-calling the function.
old_and_dull_caller()
# Check that we can still detect the DeprecationWarning.
with self.assertRaises(self.failureException):
with self.assertNotDeprecated():
old_and_dull_caller()
if __name__ == "__main__":
unittest.main()
|
soak_test_base.py
|
#!/usr/bin/python
"""
(C) Copyright 2019-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import os
import time
from datetime import datetime, timedelta
import multiprocessing
import threading
from apricot import TestWithServers
from general_utils import run_command, DaosTestError, get_log_file
import slurm_utils
from ClusterShell.NodeSet import NodeSet
from getpass import getuser
import socket
from agent_utils import include_local_host
from soak_utils import DDHHMMSS_format, add_pools, get_remote_logs, \
launch_snapshot, launch_exclude_reintegrate, \
create_ior_cmdline, cleanup_dfuse, create_fio_cmdline, \
build_job_script, SoakTestError, launch_server_stop_start, get_harassers, \
create_racer_cmdline, run_event_check, run_monitor_check
class SoakTestBase(TestWithServers):
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-instance-attributes
"""Execute DAOS Soak test cases.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a SoakBase object."""
super().__init__(*args, **kwargs)
self.failed_job_id_list = None
self.test_log_dir = None
self.exclude_slurm_nodes = None
self.loop = None
self.log_dir = None
self.outputsoakdir = None
self.test_name = None
self.test_timeout = None
self.end_time = None
self.job_timeout = None
self.nodesperjob = None
self.taskspernode = None
self.soak_results = None
self.srun_params = None
self.harassers = None
self.harasser_results = None
self.all_failed_jobs = None
self.username = None
self.used = None
self.dfuse = []
self.harasser_args = None
self.harasser_loop_time = None
self.all_failed_harassers = None
self.soak_errors = None
self.check_errors = None
def setUp(self):
"""Define test setup to be done."""
self.log.info("<<setUp Started>> at %s", time.ctime())
super().setUp()
self.username = getuser()
# Initialize loop param for all tests
self.loop = 1
self.exclude_slurm_nodes = []
# Setup logging directories for soak logfiles
# self.output dir is an avocado directory .../data/
self.log_dir = get_log_file("soak")
self.outputsoakdir = self.outputdir + "/soak"
# Create the remote log directories on all client nodes
self.test_log_dir = self.log_dir + "/pass" + str(self.loop)
self.local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop)
self.sharedlog_dir = self.tmp + "/soak"
self.sharedsoakdir = self.sharedlog_dir + "/pass" + str(self.loop)
# Initialize dmg cmd
self.dmg_command = self.get_dmg_command()
# Fail if slurm partition is not defined
# NOTE: Slurm reservation and partition are created before soak runs.
# CI uses partition=daos_client and no reservation.
# A21 uses partition=normal/default and reservation=daos-test.
# Partition and reservation names are updated in the yaml file.
# It is assumed that if there is no reservation (CI only), then all
# the nodes in the partition will be used for soak.
if not self.client_partition:
raise SoakTestError(
"<<FAILED: Partition is not correctly setup for daos "
"slurm partition>>")
self.srun_params = {"partition": self.client_partition}
if self.client_reservation:
self.srun_params["reservation"] = self.client_reservation
# Check if the server nodes are in the client list;
# this will happen when only one partition is specified
for host_server in self.hostlist_servers:
if host_server in self.hostlist_clients:
self.hostlist_clients.remove(host_server)
self.exclude_slurm_nodes.append(host_server)
# Include test node for log cleanup; remove from client list
local_host_list = include_local_host(None)
self.exclude_slurm_nodes.extend(local_host_list)
if local_host_list[0] in self.hostlist_clients:
self.hostlist_clients.remove((local_host_list[0]))
if not self.hostlist_clients:
self.fail(
"There are no valid nodes in this partition to run "
"soak. Check partition {} for valid nodes".format(
self.client_partition))
def pre_tear_down(self):
"""Tear down any test-specific steps prior to running tearDown().
Returns:
list: a list of error strings to report after all tear down
steps have been attempted
"""
self.log.info("<<preTearDown Started>> at %s", time.ctime())
errors = []
# clear out any jobs in squeue;
if self.failed_job_id_list:
job_id = " ".join([str(job) for job in self.failed_job_id_list])
self.log.info("<<Cancel jobs in queue with ids %s >>", job_id)
try:
run_command(
"scancel --partition {} -u {} {}".format(
self.client_partition, self.username, job_id))
except DaosTestError as error:
# Exception was raised due to a non-zero exit status
errors.append("Failed to cancel jobs {}: {}".format(
self.failed_job_id_list, error))
if self.all_failed_jobs:
errors.append("SOAK FAILED: The following jobs failed {} ".format(
" ,".join(str(j_id) for j_id in self.all_failed_jobs)))
if self.all_failed_harassers:
errors.extend(self.all_failed_harassers)
if self.soak_errors:
errors.extend(self.soak_errors)
if self.check_errors:
errors.extend(self.check_errors)
# Check if any dfuse mount points need to be cleaned
if self.dfuse:
try:
cleanup_dfuse(self)
except SoakTestError as error:
self.log.info("Dfuse cleanup failed with %s", error)
# daos_agent is always started on this node when start agent is false
if not self.setup_start_agents:
self.hostlist_clients = [socket.gethostname().split('.', 1)[0]]
for error in errors:
self.log.info("<<ERRORS: %s >>\n", error)
return errors
def launch_harasser(self, harasser, pool):
"""Launch any harasser tests if defined in yaml.
Args:
harasser (str): harasser to launch
pool (list): list of TestPool obj
Returns:
status_msg(str): pass/fail status message
"""
# Init the status message
status_msg = None
job = None
results = multiprocessing.Queue()
args = multiprocessing.Queue()
# Launch harasser
self.log.info("\n<<<Launch harasser %s>>>\n", harasser)
if harasser == "snapshot":
method = launch_snapshot
name = "SNAPSHOT"
params = (self, self.pool[0], name)
job = threading.Thread(target=method, args=params, name=name)
elif harasser == "exclude":
method = launch_exclude_reintegrate
name = "EXCLUDE"
params = (self, pool[1], name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
elif harasser == "reintegrate":
method = launch_exclude_reintegrate
name = "REINTEGRATE"
params = (self, pool[1], name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
elif harasser == "server-stop":
method = launch_server_stop_start
name = "SVR_STOP"
params = (self, pool, name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
elif harasser == "server-reintegrate":
method = launch_server_stop_start
name = "SVR_REINTEGRATE"
params = (self, pool, name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
else:
raise SoakTestError(
"<<FAILED: Harasser {} is not supported. ".format(
harasser))
# start harasser
job.start()
timeout = self.params.get("harasser_to", "/run/soak_harassers/*", 30)
# Wait for harasser job to join
job.join(timeout)
if job.is_alive():
self.log.error(
"<< ERROR: harasser %s is alive, failed to join>>", job.name)
if name not in ["REBUILD", "SNAPSHOT"]:
job.terminate()
status_msg = "<<FAILED: {} has been terminated.".format(name)
raise SoakTestError(
"<<FAILED: Soak failed while running {} . ".format(name))
if name not in ["REBUILD", "SNAPSHOT"]:
self.harasser_results = results.get()
self.harasser_args = args.get()
# Check if the completed job passed
self.log.info("Harasser results: %s", self.harasser_results)
self.log.info("Harasser args: %s", self.harasser_args)
if not self.harasser_results[name.upper()]:
status_msg = "<< HARASSER {} FAILED in pass {} at {}>> ".format(
name, self.loop, time.ctime())
self.log.error(status_msg)
return status_msg
def harasser_job_done(self, args):
"""Call this function when a job is done.
Args:
args (list):name job name of harasser,
status job completion status
vars: variables used in harasser
"""
self.harasser_results[args["name"]] = args["status"]
self.harasser_args[args["name"]] = args["vars"]
def job_setup(self, job, pool):
"""Create the cmdline needed to launch job.
Args:
job(str): single job from test params list of jobs to run
pool (obj): TestPool obj
Returns:
job_cmdlist: list cmdline that can be launched
by specified job manager
"""
job_cmdlist = []
commands = []
scripts = []
nodesperjob = []
self.log.info("<<Job_Setup %s >> at %s", self.test_name, time.ctime())
for npj in self.nodesperjob:
# nodesperjob = -1 indicates to use all nodes in client hostlist
if npj < 0:
npj = len(self.hostlist_clients)
if len(self.hostlist_clients)/npj < 1:
raise SoakTestError(
"<<FAILED: There are only {} client nodes for this job. "
"Job requires {}".format(
len(self.hostlist_clients), npj))
nodesperjob.append(npj)
if "ior" in job:
for npj in nodesperjob:
for ppn in self.taskspernode:
commands = create_ior_cmdline(self, job, pool, ppn, npj)
# scripts are single cmdline
scripts = build_job_script(self, commands, job, npj)
job_cmdlist.extend(scripts)
elif "fio" in job:
commands = create_fio_cmdline(self, job, pool)
# scripts are single cmdline
scripts = build_job_script(self, commands, job, 1)
job_cmdlist.extend(scripts)
elif "daos_racer" in job:
self.add_cancel_ticket("DAOS-6938", "daos_racer pool connect")
# Uncomment the following when DAOS-6938 is fixed
# commands = create_racer_cmdline(self, job, pool)
# # scripts are single cmdline
# scripts = build_job_script(self, commands, job, 1)
job_cmdlist.extend(scripts)
else:
raise SoakTestError(
"<<FAILED: Job {} is not supported. ".format(
self.job))
return job_cmdlist
def job_startup(self, job_cmdlist):
"""Submit job batch script.
Args:
job_cmdlist (list): list of jobs to execute
Returns:
job_id_list: IDs of each job submitted to slurm.
"""
self.log.info(
"<<Job Startup - %s >> at %s", self.test_name, time.ctime())
job_id_list = []
# before submitting the jobs to the queue, check the job timeout;
if time.time() > self.end_time:
self.log.info("<< SOAK test timeout in Job Startup>>")
return job_id_list
# job_cmdlist is a list of batch script files
for script in job_cmdlist:
try:
job_id = slurm_utils.run_slurm_script(str(script))
except slurm_utils.SlurmFailed as error:
self.log.error(error)
# Force the test to exit with failure
job_id = None
if job_id:
self.log.info(
"<<Job %s started with %s >> at %s",
job_id, script, time.ctime())
slurm_utils.register_for_job_results(
job_id, self, maxwait=self.test_timeout)
# keep a list of the job_id's
job_id_list.append(int(job_id))
else:
# one of the jobs failed to queue; exit on first fail for now.
err_msg = "Slurm failed to submit job for {}".format(script)
job_id_list = []
raise SoakTestError(
"<<FAILED: Soak {}: {}>>".format(self.test_name, err_msg))
return job_id_list
def job_completion(self, job_id_list):
"""Wait for job completion and cleanup.
Args:
job_id_list: IDs of each job submitted to slurm
Returns:
failed_job_id_list: IDs of each job that failed in slurm
"""
self.log.info(
"<<Job Completion - %s >> at %s", self.test_name, time.ctime())
harasser_interval = 0
failed_harasser_msg = None
harasser_timer = time.time()
check_time = datetime.now()
event_check_messages = []
since = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# loop time exists after the first pass; no harassers in the first pass
if self.harasser_loop_time and self.harassers:
harasser_interval = self.harasser_loop_time / (
len(self.harassers) + 1)
# If there is nothing to do; exit
if job_id_list:
# wait for all the jobs to finish
while len(self.soak_results) < len(job_id_list):
# wait for the jobs to complete.
# enter tearDown before hitting the avocado timeout
if time.time() > self.end_time:
self.log.info(
"<< SOAK test timeout in Job Completion at %s >>",
time.ctime())
for job in job_id_list:
_ = slurm_utils.cancel_jobs(int(job))
# monitor events every 15 min
if datetime.now() > check_time:
run_monitor_check(self)
check_time = datetime.now() + timedelta(minutes=15)
# launch harassers if enabled;
# one harasser at a time starting on pass2
if self.harassers:
if self.loop >= 2 and (
time.time() > (harasser_timer + harasser_interval)):
harasser = self.harassers.pop(0)
harasser_timer += harasser_interval
failed_harasser_msg = self.launch_harasser(
harasser, self.pool)
time.sleep(5)
# check journalctl for events;
until = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
event_check_messages = run_event_check(self, since, until)
self.check_errors.extend(event_check_messages)
run_monitor_check(self)
# init harasser list when all jobs are done
self.harassers = []
if failed_harasser_msg is not None:
self.all_failed_harassers.append(failed_harasser_msg)
# check for JobStatus = COMPLETED or CANCELLED (i.e. TEST TO)
for job, result in list(self.soak_results.items()):
if result in ["COMPLETED", "CANCELLED"]:
job_id_list.remove(int(job))
else:
self.log.info(
"<< Job %s failed with status %s>>", job, result)
# gather all the logfiles for this pass and cleanup test nodes
try:
get_remote_logs(self)
except SoakTestError as error:
self.log.info("Remote copy failed with %s", error)
self.soak_results = {}
return job_id_list
def job_done(self, args):
"""Call this function when a job is done.
Args:
args (list):handle --which job, i.e. the job ID,
state --string indicating job completion status
"""
self.soak_results[args["handle"]] = args["state"]
def execute_jobs(self, jobs, pools):
"""Execute the overall soak test.
Args:
pools (list): list of TestPool obj - self.pool[1:]
Raise:
SoakTestError
"""
cmdlist = []
# unique numbers per pass
self.used = []
# Update the remote log directories from new loop/pass
self.sharedsoakdir = self.sharedlog_dir + "/pass" + str(self.loop)
self.test_log_dir = self.log_dir + "/pass" + str(self.loop)
local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop)
result = slurm_utils.srun(
NodeSet.fromlist(self.hostlist_clients), "mkdir -p {}".format(
self.test_log_dir), self.srun_params)
if result.exit_status > 0:
raise SoakTestError(
"<<FAILED: logfile directory not"
"created on clients>>: {}".format(self.hostlist_clients))
# Create local log directory
os.makedirs(local_pass_dir)
os.makedirs(self.sharedsoakdir)
# Setup cmdlines for job with specified pool
# if len(pools) < len(jobs):
# raise SoakTestError(
# "<<FAILED: There are not enough pools to run this test>>")
# for index, job in enumerate(jobs):
# cmdlist.extend(self.job_setup(job, pools))
for job in jobs:
cmdlist.extend(self.job_setup(job, pools))
# Gather the job_ids
job_id_list = self.job_startup(cmdlist)
# Initialize the failed_job_list to job_list so that any
# unexpected failures will clear the squeue in tearDown
self.failed_job_id_list = job_id_list
# Wait for jobs to finish and cancel/kill jobs if necessary
self.failed_job_id_list = self.job_completion(job_id_list)
# Log the failing job ID
if self.failed_job_id_list:
self.log.info(
"<<FAILED: The following jobs failed %s >>", (" ,".join(
str(j_id) for j_id in self.failed_job_id_list)))
# accumulate failing job IDs
self.all_failed_jobs.extend(self.failed_job_id_list)
# clear out the failed jobs for this pass
self.failed_job_id_list = []
def run_soak(self, test_param):
"""Run the soak test specified by the test params.
Args:
test_param (str): test_params from yaml file
"""
self.soak_results = {}
self.pool = []
self.container = []
self.harasser_results = {}
self.harasser_args = {}
run_harasser = False
self.all_failed_jobs = []
self.all_failed_harassers = []
self.soak_errors = []
self.check_errors = []
test_to = self.params.get("test_timeout", test_param + "*")
self.job_timeout = self.params.get("job_timeout", test_param + "*")
self.test_name = self.params.get("name", test_param + "*")
self.nodesperjob = self.params.get("nodesperjob", test_param + "*")
self.taskspernode = self.params.get("taskspernode", test_param + "*")
single_test_pool = self.params.get(
"single_test_pool", test_param + "*", True)
self.dmg_command.copy_certificates(
get_log_file("daosCA/certs"), self.hostlist_clients)
self.dmg_command.copy_configuration(self.hostlist_clients)
harassers = self.params.get("harasserlist", test_param + "*")
job_list = self.params.get("joblist", test_param + "*")
rank = self.params.get("rank", "/run/container_reserved/*")
obj_class = self.params.get("oclass", "/run/container_reserved/*")
if harassers:
harasserlist = get_harassers(harassers)
self.harassers = harasserlist[:]
run_harasser = True
self.log.info("<< Initial harrasser list = %s>>", " ".join(
self.harassers))
# Create the reserved pool with data
# self.pool is a list of all the pools used in soak
# self.pool[0] will always be the reserved pool
add_pools(self, ["pool_reserved"])
self.pool[0].connect()
# Create the container and populate with a known data
# TO-DO: use IOR to write and later read verify the data
resv_cont = self.get_container(
self.pool[0], "/run/container_reserved/*", True)
resv_cont.write_objects(rank, obj_class)
# Create pool for jobs
if single_test_pool:
add_pools(self, ["pool_jobs"])
self.log.info(
"Current pools: %s",
" ".join([pool.uuid for pool in self.pool]))
# cleanup soak log directories before test on all nodes
result = slurm_utils.srun(
NodeSet.fromlist(self.hostlist_clients), "rm -rf {}".format(
self.log_dir), self.srun_params)
if result.exit_status > 0:
raise SoakTestError(
"<<FAILED: Soak directories not removed"
"from clients>>: {}".format(self.hostlist_clients))
# cleanup test_node
for log_dir in [self.log_dir, self.sharedlog_dir]:
cmd = "rm -rf {}".format(log_dir)
try:
result = run_command(cmd, timeout=30)
except DaosTestError as error:
raise SoakTestError(
"<<FAILED: Soak directory {} was not removed>>".format(
log_dir)) from error
# Initialize time
start_time = time.time()
self.test_timeout = int(3600 * test_to)
self.end_time = start_time + self.test_timeout
self.log.info("<<START %s >> at %s", self.test_name, time.ctime())
while time.time() < self.end_time:
# Start new pass
start_loop_time = time.time()
self.log.info(
"<<SOAK LOOP %s: time until done %s>>", self.loop,
DDHHMMSS_format(self.end_time - time.time()))
if not single_test_pool:
# Create pool for jobs
add_pools(self, ["pool_jobs"])
self.log.info(
"Current pools: %s",
" ".join([pool.uuid for pool in self.pool]))
# Initialize if harassers
if run_harasser and not self.harassers:
self.harasser_results = {}
self.harasser_args = {}
self.harassers = harasserlist[:]
try:
self.execute_jobs(job_list, self.pool[1])
except SoakTestError as error:
self.fail(error)
# Check space after jobs done
for pool in self.pool:
self.dmg_command.pool_query(pool.uuid)
self.soak_errors.extend(self.destroy_containers(self.container))
self.container = []
# remove the test pools from self.pool; preserving reserved pool
if not single_test_pool:
self.soak_errors.extend(self.destroy_pools(self.pool[1]))
self.pool = [self.pool[0]]
self.log.info(
"Current pools: %s",
" ".join([pool.uuid for pool in self.pool]))
# fail if the pool/containers did not clean up correctly
self.assertEqual(
len(self.soak_errors), 0, "\n".join(self.soak_errors))
# Break out of loop if smoke
if "smoke" in self.test_name:
break
loop_time = time.time() - start_loop_time
self.log.info(
"<<LOOP %s completed in %s at %s>>", self.loop, DDHHMMSS_format(
loop_time), time.ctime())
# Initialize harasser loop time from first pass loop time
if self.loop == 1 and run_harasser:
self.harasser_loop_time = loop_time
self.loop += 1
# TO-DO: use IOR
if not resv_cont.read_objects():
self.soak_errors.append("Data verification error on reserved pool"
" after SOAK completed")
self.container.append(resv_cont)
# gather the daos logs from the client nodes
self.log.info(
"<<<<SOAK TOTAL TEST TIME = %s>>>>", DDHHMMSS_format(
time.time() - start_time))
|
explorations.py
|
# explorations.py
# Imports
import os
import csv
import math
import copy
import logging
import operator
import datetime
from functools import reduce
from itertools import combinations
from collections import defaultdict, Counter, OrderedDict
from typing import Dict, List, Tuple, Generator, Optional, DefaultDict
import h5py
import numpy as np
import pandas as pd
import multiprocessing as mp
from tensorflow.keras.models import Model
import matplotlib
matplotlib.use('Agg') # Need this to write images from the GSA servers. Order matters:
import matplotlib.pyplot as plt # First import matplotlib, then use Agg, then import plt
from ml4h.models.legacy_models import make_multimodal_multitask_model
from ml4h.TensorMap import TensorMap, Interpretation, decompress_data
from ml4h.tensor_generators import TensorGenerator, test_train_valid_tensor_generators
from ml4h.tensor_generators import BATCH_INPUT_INDEX, BATCH_OUTPUT_INDEX, BATCH_PATHS_INDEX
from ml4h.plots import plot_histograms_in_pdf, plot_heatmap, SUBPLOT_SIZE
from ml4h.plots import evaluate_predictions, subplot_rocs, subplot_scatters, plot_categorical_tmap_over_time
from ml4h.defines import JOIN_CHAR, MRI_SEGMENTED_CHANNEL_MAP, CODING_VALUES_MISSING, CODING_VALUES_LESS_THAN_ONE
from ml4h.defines import TENSOR_EXT, IMAGE_EXT, ECG_CHAR_2_IDX, ECG_IDX_2_CHAR, PARTNERS_CHAR_2_IDX, PARTNERS_IDX_2_CHAR, PARTNERS_READ_TEXT
CSV_EXT = '.tsv'
def predictions_to_pngs(
predictions: np.ndarray, tensor_maps_in: List[TensorMap], tensor_maps_out: List[TensorMap], data: Dict[str, np.ndarray],
labels: Dict[str, np.ndarray], paths: List[str], folder: str,
) -> None:
# TODO Remove this command line order dependency
input_map = tensor_maps_in[0]
if not os.path.exists(folder):
os.makedirs(folder)
_save_tensor_map_tensors_as_pngs(tensor_maps_in, data, paths, folder)
for y, tm in zip(predictions, tensor_maps_out):
if not isinstance(predictions, list): # When models have a single output model.predict returns a ndarray otherwise it returns a list
y = predictions
for im in tensor_maps_in:
if tm.is_categorical() and im.dependent_map == tm:
input_map = im
elif tm.shape == im.shape:
input_map = im
logging.info(f"Write predictions as PNGs TensorMap:{tm.name}, y shape:{y.shape} labels:{labels[tm.output_name()].shape} folder:{folder}")
if tm.is_mesh():
vmin = np.min(data[input_map.input_name()])
vmax = np.max(data[input_map.input_name()])
for i in range(y.shape[0]):
sample_id = os.path.basename(paths[i]).replace(TENSOR_EXT, '')
if input_map.axes() == 4 and input_map.shape[-1] == 1:
sample_data = data[input_map.input_name()][i, ..., 0]
cols = max(2, int(math.ceil(math.sqrt(sample_data.shape[-1]))))
rows = max(2, int(math.ceil(sample_data.shape[-1] / cols)))
path_prefix = f'{folder}{sample_id}_bbox_batch_{i:02d}{IMAGE_EXT}'
logging.info(f"sample_data shape: {sample_data.shape} cols {cols}, {rows} Predicted BBox: {y[i]}, True BBox: {labels[tm.output_name()][i]} Vmin {vmin} Vmax{vmax}")
_plot_3d_tensor_slices_as_gray(sample_data, path_prefix, cols, rows, bboxes=[labels[tm.output_name()][i], y[i]])
else:
fig, ax = plt.subplots(1)
if input_map.axes() == 3 and input_map.shape[-1] == 1:
ax.imshow(data[input_map.input_name()][i, :, :, 0], cmap='gray', vmin=vmin, vmax=vmax)
elif input_map.axes() == 2:
ax.imshow(data[input_map.input_name()][i, :, :], cmap='gray', vmin=vmin, vmax=vmax)
corner, width, height = _2d_bbox_to_corner_and_size(labels[tm.output_name()][i])
ax.add_patch(matplotlib.patches.Rectangle(corner, width, height, linewidth=1, edgecolor='g', facecolor='none'))
y_corner, y_width, y_height = _2d_bbox_to_corner_and_size(y[i])
ax.add_patch(matplotlib.patches.Rectangle(y_corner, y_width, y_height, linewidth=1, edgecolor='y', facecolor='none'))
logging.info(f"True BBox: {corner}, {width}, {height} Predicted BBox: {y_corner}, {y_width}, {y_height} Vmin {vmin} Vmax{vmax}")
plt.savefig(f"{folder}{sample_id}_bbox_batch_{i:02d}{IMAGE_EXT}")
elif tm.axes() == 2:
fig = plt.figure(figsize=(SUBPLOT_SIZE, SUBPLOT_SIZE * 3))
for i in range(y.shape[0]):
sample_id = os.path.basename(paths[i]).replace(TENSOR_EXT, '')
title = f'{tm.name}_{sample_id}_reconstruction'
for j in range(tm.shape[1]):
plt.subplot(tm.shape[1], 1, j + 1)
plt.plot(labels[tm.output_name()][i, :, j], c='k', linestyle='--', label='original')
plt.plot(y[i, :, j], c='b', label='reconstruction')
if j == 0:
plt.title(title)
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(folder, title + IMAGE_EXT))
plt.clf()
elif len(tm.shape) == 3:
for i in range(y.shape[0]):
sample_id = os.path.basename(paths[i]).replace(TENSOR_EXT, '')
if tm.is_categorical():
plt.imsave(f"{folder}{sample_id}_{tm.name}_truth_{i:02d}{IMAGE_EXT}", np.argmax(labels[tm.output_name()][i], axis=-1), cmap='plasma')
plt.imsave(f"{folder}{sample_id}_{tm.name}_prediction_{i:02d}{IMAGE_EXT}", np.argmax(y[i], axis=-1), cmap='plasma')
else:
plt.imsave(f'{folder}{sample_id}_{tm.name}_truth_{i:02d}{IMAGE_EXT}', labels[tm.output_name()][i, :, :, 0], cmap='gray')
plt.imsave(f'{folder}{sample_id}_{tm.name}_prediction_{i:02d}{IMAGE_EXT}', y[i, :, :, 0], cmap='gray')
elif len(tm.shape) == 4:
for i in range(y.shape[0]):
sample_id = os.path.basename(paths[i]).replace(TENSOR_EXT, '')
for j in range(y.shape[3]):
image_path_base = f'{folder}{sample_id}_{tm.name}_{i:03d}_{j:03d}'
if tm.is_categorical():
truth = np.argmax(labels[tm.output_name()][i, :, :, j, :], axis=-1)
prediction = np.argmax(y[i, :, :, j, :], axis=-1)
plt.imsave(f'{image_path_base}_truth{IMAGE_EXT}', truth, cmap='plasma')
plt.imsave(f'{image_path_base}_prediction{IMAGE_EXT}', prediction, cmap='plasma')
else:
plt.imsave(f'{image_path_base}_truth{IMAGE_EXT}', labels[tm.output_name()][i, :, :, j, 0], cmap='gray')
plt.imsave(f'{image_path_base}_prediction{IMAGE_EXT}', y[i, :, :, j, :], cmap='gray')
def _save_tensor_map_tensors_as_pngs(tensor_maps_in: List[TensorMap], data: Dict[str, np.ndarray], paths, folder):
for tm in tensor_maps_in:
tensor = data[tm.input_name()]
for i in range(tensor.shape[0]):
sample_id = os.path.basename(paths[i]).replace(TENSOR_EXT, '')
if len(tm.shape) not in [3, 4]:
continue
for j in range(tensor.shape[3]):
if len(tm.shape) == 3:
plt.imsave(f"{folder}{sample_id}_input_{tm.name}_{i:02d}_{j:02d}{IMAGE_EXT}", tensor[i, :, :, j], cmap='gray')
elif len(tm.shape) == 4:
plt.imsave(f"{folder}{sample_id}_input_{tm.name}_{i:02d}_{j:02d}{IMAGE_EXT}", tensor[i, :, :, j, 0], cmap='gray')
def plot_while_learning(
model, tensor_maps_in: List[TensorMap], tensor_maps_out: List[TensorMap],
generate_train: Generator[Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Optional[List[str]]], None, None],
test_data: Dict[str, np.ndarray], test_labels: Dict[str, np.ndarray], test_paths: List[str], epochs: int, batch_size: int,
training_steps: int, folder: str, write_pngs: bool,
):
if not os.path.exists(folder):
os.makedirs(folder)
for i in range(epochs):
rocs = []
scatters = []
predictions = model.predict(test_data, batch_size=batch_size)
if len(tensor_maps_out) == 1:
predictions = [predictions]
for y, tm in zip(predictions, tensor_maps_out):
for im in tensor_maps_in:
if im.dependent_map == tm:
break
if not write_pngs:
mri_in = test_data[im.input_name()]
vmin = np.min(mri_in)
vmax = np.max(mri_in)
logging.info(f"epoch:{i} write segmented mris y shape:{y.shape} label shape:{test_labels[tm.output_name()].shape} to folder:{folder}")
if tm.is_categorical() and len(tm.shape) == 3:
for yi in range(y.shape[0]):
plt.imsave(f"{folder}batch_{yi}_truth_epoch_{i:03d}{IMAGE_EXT}", np.argmax(test_labels[tm.output_name()][yi], axis=-1), cmap='gray')
plt.imsave(f"{folder}batch_{yi}_prediction_epoch_{i:03d}{IMAGE_EXT}", np.argmax(y[yi], axis=-1), cmap='gray')
plt.imsave(f"{folder}batch_{yi}_mri_epoch_{i:03d}{IMAGE_EXT}", mri_in[yi, :, :, 0], cmap='gray', vmin=vmin, vmax=vmax)
elif tm.is_categorical() and len(tm.shape) == 4:
for yi in range(y.shape[0]):
for j in range(y.shape[3]):
truth = np.argmax(test_labels[tm.output_name()][yi, :, :, j, :], axis=-1)
prediction = np.argmax(y[yi, :, :, j, :], axis=-1)
true_donut = np.ma.masked_where(truth == 2, mri_in[yi, :, :, j, 0])
predict_donut = np.ma.masked_where(prediction == 2, mri_in[yi, :, :, j, 0])
plt.imsave(f"{folder}batch_{yi}_slice_{j:03d}_prediction_epoch_{i:03d}{IMAGE_EXT}", prediction, cmap='gray')
plt.imsave(f"{folder}batch_{yi}_slice_{j:03d}_p_donut_epoch_{i:03d}{IMAGE_EXT}", predict_donut, cmap='gray', vmin=vmin, vmax=vmax)
if i == 0:
plt.imsave(f"{folder}batch_{yi}_slice_{j:03d}_truth_epoch_{i:03d}{IMAGE_EXT}", truth, cmap='gray')
plt.imsave(f"{folder}batch_{yi}_slice_{j:03d}_t_donut_epoch_{i:03d}{IMAGE_EXT}", true_donut, cmap='gray', vmin=vmin, vmax=vmax)
plt.imsave(f"{folder}batch_{yi}_slice_{j:03d}_mri_epoch_{i:03d}{IMAGE_EXT}", mri_in[yi, :, :, j, 0], cmap='gray', vmin=vmin, vmax=vmax)
else:
logging.warning(f'Not writing PNGs')
elif write_pngs:
if len(tensor_maps_out) == 1:
y = predictions[0]
evaluate_predictions(tm, y, test_labels[tm.output_name()], f"{tm.name}_epoch_{i:03d}", folder, test_paths, test_labels, rocs=rocs, scatters=scatters)
if len(rocs) > 1:
subplot_rocs(rocs, folder+f"epoch_{i:03d}_")
if len(scatters) > 1:
subplot_scatters(scatters, folder+f"epoch_{i:03d}_")
model.fit_generator(generate_train, steps_per_epoch=training_steps, epochs=1, verbose=1)
def plot_histograms_of_tensors_in_pdf(
run_id: str,
tensor_folder: str,
output_folder: str,
max_samples: int = None,
) -> None:
"""
:param id: name for the plotting run
:param tensor_folder: directory with tensor files to plot histograms from
:param output_folder: folder containing the output plot
:param max_samples: specifies how many tensor files to down-sample from; by default all tensors are used
"""
stats, num_tensor_files = _collect_continuous_stats_from_tensor_files(tensor_folder, max_samples)
logging.info(f"Collected continuous stats for {len(stats)} fields. Now plotting histograms of them...")
plot_histograms_in_pdf(stats, num_tensor_files, run_id, output_folder)
def plot_heatmap_of_tensors(
id: str,
tensor_folder: str,
output_folder: str,
min_samples: int,
max_samples: int = None,
) -> None:
"""
:param id: name for the plotting run
:param tensor_folder: directory with tensor files to plot histograms from
:param output_folder: folder containing the output plot
:param min_samples: calculate correlation coefficient only if both fields have values from that many common samples
:param max_samples: specifies how many tensor files to down-sample from; by default all tensors are used
"""
stats, _ = _collect_continuous_stats_from_tensor_files(tensor_folder, max_samples, ['0'], 0)
logging.info(f"Collected continuous stats for {len(stats)} fields. Now plotting a heatmap of their correlations...")
plot_heatmap(stats, id, min_samples, output_folder)
def tabulate_correlations_of_tensors(
run_id: str,
tensor_folder: str,
output_folder: str,
min_samples: int,
max_samples: int = None,
) -> None:
"""
:param id: name for the plotting run
:param tensor_folder: directory with tensor files to plot histograms from
:param output_folder: folder containing the output plot
:param min_samples: calculate correlation coefficient only if both fields have values from that many common samples
:param max_samples: specifies how many tensor files to down-sample from; by default all tensors are used
"""
stats, _ = _collect_continuous_stats_from_tensor_files(tensor_folder, max_samples)
logging.info(f"Collected continuous stats for {len(stats)} fields. Now tabulating their cross-correlations...")
_tabulate_correlations(stats, run_id, min_samples, output_folder)
def mri_dates(tensors: str, output_folder: str, run_id: str):
incident_dates = []
prevalent_dates = []
disease = 'hypertension'
disease_date_key = disease + '_date'
data_date_key = 'assessment-date_0_0'
tensor_paths = [tensors + tp for tp in os.listdir(tensors) if os.path.splitext(tp)[-1].lower() == TENSOR_EXT]
for tp in tensor_paths:
try:
with h5py.File(tp, 'r') as hd5:
if data_date_key in hd5 and disease_date_key in hd5:
if int(hd5[disease][0]) == 1:
data_date = str2date(str(hd5[data_date_key][0]))
disease_date = str2date(str(hd5[disease_date_key][0]))
if data_date < disease_date:
incident_dates.append(disease_date)
else:
prevalent_dates.append(disease_date)
except:
logging.exception(f"Broken tensor at:{tp}")
plt.figure(figsize=(12, 12))
plt.xlabel(data_date_key)
plt.hist(incident_dates, bins=60)
plt.savefig(os.path.join(output_folder, run_id, disease+'_'+data_date_key+'_incident'+IMAGE_EXT))
plt.figure(figsize=(12,12))
plt.xlabel(data_date_key)
plt.hist(prevalent_dates, bins=60)
plt.savefig(os.path.join(output_folder, run_id, disease+'_'+data_date_key+'_prevalent'+IMAGE_EXT))
def ecg_dates(tensors: str, output_folder: str, run_id: str):
incident_dates = []
prevalent_dates = []
tensor_paths = [tensors + tp for tp in os.listdir(tensors) if os.path.splitext(tp)[-1].lower()==TENSOR_EXT]
for tp in tensor_paths:
try:
with h5py.File(tp, 'r') as hd5:
if 'ecg_bike_date_0' in hd5 and 'coronary_artery_disease_soft_date' in hd5:
ecg_date = str2date(str(hd5['ecg_bike_date_0'][0]))
cad_date = str2date(str(hd5['coronary_artery_disease_soft_date'][0]))
if ecg_date < cad_date:
incident_dates.append(ecg_date)
else:
prevalent_dates.append(ecg_date)
except:
logging.exception(f"Broken tensor at:{tp}")
plt.figure(figsize=(12, 12))
plt.xlabel('ECG Acquisition Date')
plt.hist(incident_dates, bins=60)
plt.savefig(os.path.join(output_folder, run_id, 'ecg_dates_incident'+IMAGE_EXT))
plt.figure(figsize=(12, 12))
plt.xlabel('ECG Acquisition Date')
plt.hist(prevalent_dates, bins=60)
plt.savefig(os.path.join(output_folder, run_id, 'ecg_dates_prevalent'+IMAGE_EXT))
def str2date(d):
parts = d.split('-')
if len(parts) < 2:
return datetime.datetime.now().date()
return datetime.date(int(parts[0]), int(parts[1]), int(parts[2]))
def sample_from_language_model(
language_input: TensorMap, language_output: TensorMap,
model, test_data, max_samples=16, heat=0.7,
):
burn_in = np.zeros((1,) + language_input.shape, dtype=np.float32)
index_2_token = {v: k for k, v in language_output.channel_map.items()}
for i in range(min(max_samples, test_data[language_input.input_name()].shape[0])): # iterate over the batch
burn_in[0] = test_data[language_input.input_name()][i]
sentence = ''.join([index_2_token[np.argmax(one_hot)] for one_hot in burn_in[0]])
logging.info(f' Batch sentence start:{sentence} ------- {i}')
for j in range(max_samples):
burn_in = np.zeros((1,) + language_input.shape, dtype=np.float32)
for k, c in enumerate(sentence[j:]):
burn_in[0, k, language_output.channel_map[c]] = 1.0
cur_test = {language_input.input_name(): burn_in}
prediction = model.predict(cur_test)
next_token = index_2_token[_sample_with_heat(prediction[0, :], heat)]
sentence += next_token
logging.info(f'Model completed sentence:{sentence}')
def sample_from_char_embed_model(tensor_maps_in: List[TensorMap], char_model: Model, test_batch: Dict[str, np.ndarray], test_paths: List[str]) -> None:
for tm in tensor_maps_in:
if tm.interpretation == Interpretation.LANGUAGE:
language_map = tm
if PARTNERS_READ_TEXT in tm.name:
index_map = PARTNERS_IDX_2_CHAR
char_map = PARTNERS_CHAR_2_IDX
else:
index_map = ECG_IDX_2_CHAR
char_map = ECG_CHAR_2_IDX
elif tm.interpretation == Interpretation.EMBEDDING:
embed_map = tm
try:
embed_map
except NameError:
raise ValueError(f'Sampling from a character level model requires an embedding tmap.')
window_size = test_batch[language_map.input_name()].shape[1]
alphabet_size = test_batch[language_map.input_name()].shape[2]
for i in range(test_batch[embed_map.input_name()].shape[0]):
count = 0
sentence = ''
next_char = ''
embed_in = test_batch[embed_map.input_name()][i:i+1, :]
burn_in = np.zeros((1, window_size, alphabet_size), dtype=np.float32)
window_size = burn_in.shape[1]
with h5py.File(test_paths[i], 'r') as hd5:
logging.info(f"\n")
if 'read_' in language_map.name:
caption = decompress_data(data_compressed=hd5[tm.name][()], dtype=hd5[tm.name].attrs['dtype'])
else:
caption = str(tm.hd5_first_dataset_in_group(hd5, tm.hd5_key_guess())[()]).strip()
logging.info(f"Real text: {caption}")
while next_char != '!' and count < 400:
cur_test = {embed_map.input_name(): embed_in, language_map.input_name(): burn_in}
y_pred = char_model.predict(cur_test)
next_char = index_map[_sample_with_heat(y_pred[0, :], 0.7)]
sentence += next_char
burn_in = np.zeros((1,) + test_batch[language_map.input_name()].shape[1:], dtype=np.float32)
for j, c in enumerate(reversed(sentence)):
if j == window_size:
break
burn_in[0, window_size-j-1, char_map[c]] = 1.0
count += 1
logging.info(f"Model text:{sentence}")
def tensors_to_label_dictionary(
categorical_labels: List,
continuous_labels: List,
gene_labels: List,
samples2genes: Dict[str, str],
test_paths: List,
) -> Dict[str, np.ndarray]:
label_dict = {k: np.zeros((len(test_paths))) for k in categorical_labels + continuous_labels + gene_labels}
for i, tp in enumerate(test_paths):
hd5 = h5py.File(tp, 'r')
for k in categorical_labels:
if k in hd5['categorical']:
label_dict[k][i] = 1
elif k in hd5 and hd5[k][0] == 1:
label_dict[k][i] = 1
for mk in continuous_labels:
for k in mk.split('|'):
if k in hd5['continuous']:
label_dict[mk][i] = hd5['continuous'][k][0]
for k in gene_labels:
if tp in samples2genes and samples2genes[tp] == k:
label_dict[k][i] = 1
return label_dict
def test_labels_to_label_map(test_labels: Dict[TensorMap, np.ndarray], examples: int) -> Tuple[Dict[str, np.ndarray], List[str], List[str]]:
label_dict = {tm: np.zeros((examples,)) for tm in test_labels}
categorical_labels = []
continuous_labels = []
for tm in test_labels:
for i in range(examples):
if tm.is_continuous() and tm.axes() == 1:
label_dict[tm][i] = tm.rescale(test_labels[tm][i])
continuous_labels.append(tm)
elif tm.is_categorical() and tm.axes() == 1:
label_dict[tm][i] = np.argmax(test_labels[tm][i])
categorical_labels.append(tm)
return label_dict, categorical_labels, continuous_labels
def infer_with_pixels(args):
stats = Counter()
tensor_paths_inferred = {}
args.num_workers = 0
inference_tsv = os.path.join(args.output_folder, args.id, 'pixel_inference_' + args.id + '.tsv')
tensor_paths = [args.tensors + tp for tp in sorted(os.listdir(args.tensors)) if os.path.splitext(tp)[-1].lower() == TENSOR_EXT]
# hard code batch size to 1 so we can iterate over file names and generated tensors together in the tensor_paths for loop
model = make_multimodal_multitask_model(**args.__dict__)
generate_test = TensorGenerator(
1, args.tensor_maps_in, args.tensor_maps_out, tensor_paths, num_workers=args.num_workers,
cache_size=args.cache_size, keep_paths=True, mixup=args.mixup_alpha,
)
with open(inference_tsv, mode='w') as inference_file:
inference_writer = csv.writer(inference_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
header = ['sample_id']
for ot, otm in zip(args.output_tensors, args.tensor_maps_out):
if len(otm.shape) == 1 and otm.is_continuous():
header.extend([ot+'_prediction', ot+'_actual'])
elif len(otm.shape) == 1 and otm.is_categorical():
channel_columns = []
for k in otm.channel_map:
channel_columns.append(ot + '_' + k + '_prediction')
channel_columns.append(ot + '_' + k + '_actual')
header.extend(channel_columns)
elif otm.name in ['mri_systole_diastole_8_segmented', 'sax_all_diastole_segmented']:
pix_tm = args.tensor_maps_in[1]
header.extend(['pixel_size', 'background_pixel_prediction', 'background_pixel_actual', 'ventricle_pixel_prediction', 'ventricle_pixel_actual', 'myocardium_pixel_prediction', 'myocardium_pixel_actual'])
if otm.name == 'sax_all_diastole_segmented':
header.append('total_b_slices')
inference_writer.writerow(header)
while True:
batch = next(generate_test)
input_data, output_data, tensor_paths = batch[BATCH_INPUT_INDEX], batch[BATCH_OUTPUT_INDEX], batch[BATCH_PATHS_INDEX]
if tensor_paths[0] in tensor_paths_inferred:
logging.info(f"Inference on {stats['count']} tensors finished. Inference TSV file at: {inference_tsv}")
break
prediction = model.predict(input_data)
if len(args.tensor_maps_out) == 1:
prediction = [prediction]
csv_row = [os.path.basename(tensor_paths[0]).replace(TENSOR_EXT, '')] # extract sample id
for y, tm in zip(prediction, args.tensor_maps_out):
if len(tm.shape) == 1 and tm.is_continuous():
csv_row.append(str(tm.rescale(y)[0][0])) # first index into batch then index into the 1x1 structure
if tm.sentinel is not None and tm.sentinel == output_data[tm.output_name()][0][0]:
csv_row.append("NA")
else:
csv_row.append(str(tm.rescale(output_data[tm.output_name()])[0][0]))
elif len(tm.shape) == 1 and tm.is_categorical():
for k in tm.channel_map:
csv_row.append(str(y[0][tm.channel_map[k]]))
csv_row.append(str(output_data[tm.output_name()][0][tm.channel_map[k]]))
elif tm.name in ['mri_systole_diastole_8_segmented', 'sax_all_diastole_segmented']:
csv_row.append(f"{pix_tm.rescale(input_data['input_mri_pixel_width_cine_segmented_sax_inlinevf_continuous'][0][0]):0.3f}")
csv_row.append(f'{np.sum(np.argmax(y, axis=-1) == MRI_SEGMENTED_CHANNEL_MAP["background"]):0.2f}')
csv_row.append(f'{np.sum(output_data[tm.output_name()][..., MRI_SEGMENTED_CHANNEL_MAP["background"]]):0.1f}')
csv_row.append(f'{np.sum(np.argmax(y, axis=-1) == MRI_SEGMENTED_CHANNEL_MAP["ventricle"]):0.2f}')
csv_row.append(f'{np.sum(output_data[tm.output_name()][..., MRI_SEGMENTED_CHANNEL_MAP["ventricle"]]):0.1f}')
csv_row.append(f'{np.sum(np.argmax(y, axis=-1) == MRI_SEGMENTED_CHANNEL_MAP["myocardium"]):0.2f}')
csv_row.append(f'{np.sum(output_data[tm.output_name()][..., MRI_SEGMENTED_CHANNEL_MAP["myocardium"]]):0.1f}')
if tm.name == 'sax_all_diastole_segmented':
background_counts = np.count_nonzero(output_data[tm.output_name()][..., MRI_SEGMENTED_CHANNEL_MAP["background"]] == 0, axis=(0, 1, 2))
csv_row.append(f'{np.count_nonzero(background_counts):0.0f}')
inference_writer.writerow(csv_row)
tensor_paths_inferred[tensor_paths[0]] = True
stats['count'] += 1
if stats['count'] % 250 == 0:
logging.info(f"Wrote:{stats['count']} rows of inference. Last tensor:{tensor_paths[0]}")
def _sample_with_heat(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def _2d_bbox_to_corner_and_size(bbox):
total_axes = bbox.shape[-1] // 2
lower_left_corner = (bbox[1], bbox[0])
height = bbox[total_axes] - bbox[0]
width = bbox[total_axes+1] - bbox[1]
return lower_left_corner, width, height
def _plot_3d_tensor_slices_as_gray(tensor, figure_path, cols=3, rows=10, bboxes=[]):
colors = ['blue', 'red', 'green', 'yellow']
_, axes = plt.subplots(rows, cols, figsize=(cols * 4, rows * 4))
vmin = np.min(tensor)
vmax = np.max(tensor)
for i in range(tensor.shape[-1]):
axes[i // cols, i % cols].imshow(tensor[:, :, i], cmap='gray', vmin=vmin, vmax=vmax)
axes[i // cols, i % cols].set_yticklabels([])
axes[i // cols, i % cols].set_xticklabels([])
for c, bbox in enumerate(bboxes):
corner, width, height = _2d_bbox_to_corner_and_size(bbox)
axes[i // cols, i % cols].add_patch(matplotlib.patches.Rectangle(corner, width, height, linewidth=1, edgecolor=colors[c], facecolor='none'))
if not os.path.exists(os.path.dirname(figure_path)):
os.makedirs(os.path.dirname(figure_path))
plt.savefig(figure_path)
def _tabulate_correlations(
stats: Dict[str, Dict[str, List[float]]],
output_file_name: str,
min_samples: int,
output_folder_path: str,
) -> None:
"""
Tabulate in pdf correlations of field values given in 'stats'
:param stats: field names extracted from hd5 dataset names to list of values, one per sample_instance_arrayidx
:param output_file_name: name of output file in pdf
:param output_folder_path: directory that output file will be written to
:param min_samples: calculate correlation coefficient only if both fields have values from that many common samples
:return: None
"""
fields = stats.keys()
num_fields = len(fields)
field_pairs = combinations(fields, 2)
table_rows: List[list] = []
logging.info(f"There are {int(num_fields * (num_fields - 1) / 2)} field pairs.")
processed_field_pair_count = 0
nan_counter = Counter() # keep track of if we've seen a field have NaNs
for field1, field2 in field_pairs:
if field1 not in nan_counter.keys() and field2 not in nan_counter.keys():
common_samples = set(stats[field1].keys()).intersection(stats[field2].keys())
num_common_samples = len(common_samples)
processed_field_pair_count += 1
if processed_field_pair_count % 50000 == 0:
logging.debug(f"Processed {processed_field_pair_count} field pairs.")
if num_common_samples >= min_samples:
field1_values = reduce(operator.concat, [stats[field1][sample] for sample in common_samples])
field2_values = reduce(operator.concat, [stats[field2][sample] for sample in common_samples])
num_field1_nans = len(list(filter(math.isnan, field1_values)))
num_field2_nans = len(list(filter(math.isnan, field2_values)))
at_least_one_field_has_nans = False
if num_field1_nans != 0:
nan_counter[field1] = True
at_least_one_field_has_nans = True
if num_field2_nans != 0:
nan_counter[field2] = True
at_least_one_field_has_nans = True
if at_least_one_field_has_nans:
continue
if len(field1_values) == len(field2_values):
if len(set(field1_values)) == 1 or len(set(field2_values)) == 1:
logging.debug(
f"Not calculating correlation for fields {field1} and {field2} because at least one of "
f"the fields has all the same values for the {num_common_samples} common samples.",
)
continue
corr = np.corrcoef(field1_values, field2_values)[1, 0]
if not math.isnan(corr):
table_rows.append([field1, field2, corr, corr * corr, num_common_samples])
else:
logging.warning(f"Pearson correlation for fields {field1} and {field2} is NaN.")
else:
logging.debug(
f"Not calculating correlation for fields '{field1}' and '{field2}' "
f"because they have different number of values ({len(field1_values)} vs. {len(field2_values)}).",
)
else:
continue
# Note: NaNs mess up sorting unless they are handled specially by a custom sorting function
sorted_table_rows = sorted(table_rows, key=operator.itemgetter(2), reverse=True)
logging.info(f"Total number of correlations: {len(sorted_table_rows)}")
fields_with_nans = nan_counter.keys()
if len(fields_with_nans) != 0:
logging.warning(f"The {len(fields_with_nans)} fields containing NaNs are: {', '.join(fields_with_nans)}.")
table_path = os.path.join(output_folder_path, output_file_name + CSV_EXT)
table_header = ["Field 1", "Field 2", "Pearson R", "Pearson R^2", "Sample Size"]
df = pd.DataFrame(sorted_table_rows, columns=table_header)
df.to_csv(table_path, index=False)
logging.info(f"Saved correlations table at: {table_path}")
def _collect_continuous_stats_from_tensor_files(
tensor_folder: str,
max_samples: int = None,
instances: List[str] = ['0', '1', '2'],
max_arr_idx: int = None,
) -> Tuple[DefaultDict[str, DefaultDict[str, List[float]]], int]:
if not os.path.exists(tensor_folder):
raise ValueError('Source directory does not exist: ', tensor_folder)
all_tensor_files = list(filter(lambda file: file.endswith(TENSOR_EXT), os.listdir(tensor_folder)))
if max_samples is not None:
if len(all_tensor_files) < max_samples:
logging.warning(
f"{max_samples} was specified as number of samples to use but there are only "
f"{len(all_tensor_files)} tensor files in directory '{tensor_folder}'. Proceeding with those...",
)
max_samples = len(all_tensor_files)
tensor_files = np.random.choice(all_tensor_files, max_samples, replace=False)
else:
tensor_files = all_tensor_files
num_tensor_files = len(tensor_files)
logging.info(f"Collecting continuous stats from {num_tensor_files} of {len(all_tensor_files)} tensors at {tensor_folder}...")
# Declare the container to hold {field_1: {sample_1: [values], sample_2: [values], field_2:...}}
stats: DefaultDict[str, DefaultDict[str, List[float]]] = defaultdict(lambda: defaultdict(list))
file_count = 0
for tensor_file in tensor_files:
_collect_continuous_stats_from_tensor_file(tensor_folder, tensor_file, stats, instances, max_arr_idx)
file_count += 1
if file_count % 1000 == 0:
logging.debug(f"Collected continuous stats from {file_count}.")
return stats, num_tensor_files
def _collect_continuous_stats_from_tensor_file(
tensor_folder: str,
tensor_file: str,
stats: DefaultDict[str, DefaultDict[str, List[float]]],
instances: List[str],
max_arr_idx,
) -> None:
# Inlining the method below to be able to reference more from the scope than the arguments of the function
# 'h5py.visititems()' expects. It expects a func(<name>, <object>) => <None or return value>).
def _field_meaning_to_values_dict(_, obj):
if _is_continuous_valid_scalar_hd5_dataset(obj):
value_in_tensor_file = obj[0]
if value_in_tensor_file in CODING_VALUES_LESS_THAN_ONE:
field_value = 0.5
else:
field_value = value_in_tensor_file
dataset_name_parts = os.path.basename(obj.name).split(JOIN_CHAR)
if len(dataset_name_parts) == 4: # e.g. /continuous/1488_Tea-intake_0_0
field_id = dataset_name_parts[0]
field_meaning = dataset_name_parts[1]
instance = dataset_name_parts[2]
array_idx = dataset_name_parts[3]
if instance in instances:
if max_arr_idx is None or (max_arr_idx is not None and int(array_idx) <= max_arr_idx):
stats[f"{field_meaning}{JOIN_CHAR}{field_id}{JOIN_CHAR}{instance}"][sample_id].append(field_value)
else: # e.g. /continuous/VentricularRate
field_meaning = dataset_name_parts[0]
stats[field_meaning][sample_id].append(field_value)
tensor_file_path = os.path.join(tensor_folder, tensor_file)
sample_id = os.path.splitext(tensor_file)[0]
with h5py.File(tensor_file_path, 'r') as hd5_handle:
hd5_handle.visititems(_field_meaning_to_values_dict)
def _is_continuous_valid_scalar_hd5_dataset(obj) -> bool:
return obj.name.startswith('/continuous') and \
isinstance(obj, h5py.Dataset) and \
obj[0] not in CODING_VALUES_MISSING and \
len(obj.shape) == 1
def _continuous_explore_header(tm: TensorMap) -> str:
return tm.name
def _categorical_explore_header(tm: TensorMap, channel: str) -> str:
return f'{tm.name} {channel}'
class ExploreParallelWrapper():
def __init__(self, tmaps, paths, num_workers, output_folder, run_id):
self.tmaps = tmaps
self.paths = paths
self.num_workers = num_workers
self.total = len(paths)
self.output_folder = output_folder
self.run_id = run_id
self.chunksize = self.total // num_workers
self.counter = mp.Value('l', 1)
def _hd5_to_disk(self, path, gen_name):
with self.counter.get_lock():
i = self.counter.value
if i % 500 == 0:
logging.info(f"Parsing {i}/{self.total} ({i/self.total*100:.1f}%) done")
self.counter.value += 1
# each worker should write to it's own file
pid = mp.current_process().pid
fpath = os.path.join(self.output_folder, self.run_id, f'tensors_all_union_{pid}.csv')
write_header = not os.path.isfile(fpath)
try:
with h5py.File(path, "r") as hd5:
dict_of_tensor_dicts = defaultdict(dict)
# Iterate through each tmap
for tm in self.tmaps:
shape = tm.shape if tm.shape[0] is not None else tm.shape[1:]
try:
tensors = tm.tensor_from_file(tm, hd5)
if tm.shape[0] is not None:
# If not a multi-tensor tensor, wrap in array to loop through
tensors = np.array([tensors])
for i, tensor in enumerate(tensors):
if tensor is None:
break
error_type = ''
try:
tensor = tm.postprocess_tensor(tensor, augment=False, hd5=hd5)
# Append tensor to dict
if tm.channel_map:
for cm in tm.channel_map:
dict_of_tensor_dicts[i][f'{tm.name} {cm}'] = tensor[tm.channel_map[cm]]
else:
# If tensor is a scalar, isolate the value in the array;
# otherwise, retain the value as array
if shape[0] == 1:
if type(tensor) == np.ndarray:
tensor = tensor.item()
dict_of_tensor_dicts[i][tm.name] = tensor
except (IndexError, KeyError, ValueError, OSError, RuntimeError) as e:
if tm.channel_map:
for cm in tm.channel_map:
dict_of_tensor_dicts[i][f'{tm.name} {cm}'] = np.nan
else:
dict_of_tensor_dicts[i][tm.name] = np.full(shape, np.nan)[0]
error_type = type(e).__name__
dict_of_tensor_dicts[i][f'error_type_{tm.name}'] = error_type
except (IndexError, KeyError, ValueError, OSError, RuntimeError) as e:
# Most likely error came from tensor_from_file and dict_of_tensor_dicts is empty
if tm.channel_map:
for cm in tm.channel_map:
dict_of_tensor_dicts[0][f'{tm.name} {cm}'] = np.nan
else:
dict_of_tensor_dicts[0][tm.name] = np.full(shape, np.nan)[0]
dict_of_tensor_dicts[0][f'error_type_{tm.name}'] = type(e).__name__
for i in dict_of_tensor_dicts:
dict_of_tensor_dicts[i]['fpath'] = path
dict_of_tensor_dicts[i]['generator'] = gen_name
# write tdicts to disk
if len(dict_of_tensor_dicts) > 0:
keys = dict_of_tensor_dicts[0].keys()
with open(fpath, 'a') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
if write_header:
dict_writer.writeheader()
dict_writer.writerows(dict_of_tensor_dicts.values())
except OSError as e:
logging.info(f"OSError {e}")
def mp_worker(self, worker_idx):
start = worker_idx * self.chunksize
end = start + self.chunksize
if worker_idx == self.num_workers - 1:
end = self.total
for path, gen in self.paths[start:end]:
self._hd5_to_disk(path, gen)
def run(self):
workers = []
for i in range(self.num_workers):
worker = mp.Process(target=self.mp_worker, args=(i,))
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
def _tensors_to_df(args):
generators = test_train_valid_tensor_generators(wrap_with_tf_dataset=False, **args.__dict__)
tmaps = [tm for tm in args.tensor_maps_in]
paths = []
for gen, name in zip(generators, ["train", "valid", "test"]):
paths += [(path, name) for path in gen.paths] # TODO: relies on leaky abstraction of TensorGenerator
ExploreParallelWrapper(tmaps, paths, args.num_workers, args.output_folder, args.id).run()
# get columns that should have dtype 'string' instead of dtype 'O'
str_cols = ['fpath', 'generator']
for tm in tmaps:
if tm.interpretation == Interpretation.LANGUAGE:
str_cols.extend([f'{tm.name} {cm}' for cm in tm.channel_map] if tm.channel_map else [tm.name])
str_cols.append(f'error_type_{tm.name}')
str_cols = {key: 'string' for key in str_cols}
# read all temporary files to df
df = pd.DataFrame()
base = os.path.join(args.output_folder, args.id)
temp_files = []
for name in os.listdir(base):
if 'tensors_all_union_' in name:
fpath = os.path.join(base, name)
_df = pd.read_csv(fpath, dtype=str_cols)
logging.debug(f'Loaded {fpath} into memory')
df = df.append(_df, ignore_index=True)
logging.debug(f'Appended {fpath} to overall dataframe')
temp_files.append(fpath)
logging.info(f"Extracted {len(tmaps)} tmaps from {len(df)} tensors across {len(paths)} hd5 files into DataFrame")
# remove temporary files
for fpath in temp_files:
os.remove(fpath)
logging.debug(f'Deleted {len(temp_files)} temporary files')
return df
def _tmap_error_detect(tmap: TensorMap) -> TensorMap:
"""Modifies tm so it returns it's mean unless previous tensor from file fails"""
new_tm = copy.deepcopy(tmap)
new_tm.shape = (1,)
new_tm.interpretation = Interpretation.CONTINUOUS
new_tm.channel_map = None
def tff(_: TensorMap, hd5: h5py.File, dependents=None):
return tmap.tensor_from_file(tmap, hd5, dependents).mean()
new_tm.tensor_from_file = tff
return new_tm
def _should_error_detect(tm: TensorMap) -> bool:
"""Whether a tmap has to be modified to be used in explore"""
if tm.is_continuous():
return tm.shape not in {(1,), (None, 1)}
if tm.is_categorical():
if tm.shape[0] is None:
return tm.axes() > 2
else:
return tm.axes() > 1
if tm.is_language():
return False
return True
def explore(args):
tmaps = [
_tmap_error_detect(tm) if _should_error_detect(tm) else tm for tm in args.tensor_maps_in
]
args.tensor_maps_in = tmaps
fpath_prefix = "summary_stats"
tsv_style_is_genetics = 'genetics' in args.tsv_style
out_ext = 'tsv' if tsv_style_is_genetics else 'csv'
out_sep = '\t' if tsv_style_is_genetics else ','
# Iterate through tensors, get tmaps, and save to dataframe
df = _tensors_to_df(args)
# By default, remove columns with error_type
if not args.explore_export_errors:
cols = [c for c in df.columns if not c.startswith('error_type_')]
df = df[cols]
if tsv_style_is_genetics:
fid = df['fpath'].str.split('/').str[-1].str.split('.').str[0]
df.insert(0, 'FID', fid)
df.insert(1, 'IID', fid)
# Save dataframe to CSV
fpath = os.path.join(args.output_folder, args.id, f"tensors_all_union.{out_ext}")
df.to_csv(fpath, index=False, sep=out_sep)
fpath = os.path.join(args.output_folder, args.id, f"tensors_all_intersect.{out_ext}")
df.dropna().to_csv(fpath, index=False, sep=out_sep)
logging.info(f"Saved dataframe of tensors (union and intersect) to {fpath}")
# Check if any tmaps are categorical
if Interpretation.CATEGORICAL in [tm.interpretation for tm in tmaps]:
categorical_tmaps = [tm for tm in tmaps if tm.interpretation is Interpretation.CATEGORICAL]
# Iterate through 1) df, 2) df without NaN-containing rows (intersect)
for df_cur, df_str in zip([df, df.dropna()], ["union", "intersect"]):
for tm in categorical_tmaps:
counts = []
counts_missing = []
if tm.channel_map:
for cm in tm.channel_map:
key = f'{tm.name} {cm}'
counts.append(df_cur[key].sum())
counts_missing.append(df_cur[key].isna().sum())
else:
key = tm.name
counts.append(df_cur[key].sum())
counts_missing.append(df_cur[key].isna().sum())
# Append list with missing counts
counts.append(counts_missing[0])
# Append list with total counts
counts.append(sum(counts))
# Create list of row names
cm_names = [cm for cm in tm.channel_map] + ["missing", "total"]
# Transform list into dataframe indexed by channel maps
df_stats = pd.DataFrame(counts, index=cm_names, columns=["counts"])
# Add new column: percent of all counts
df_stats["percent_of_total"] = df_stats["counts"] / df_stats.loc["total"]["counts"] * 100
# Save parent dataframe to CSV on disk
fpath = os.path.join(
args.output_folder, args.id,
f"{fpath_prefix}_{Interpretation.CATEGORICAL}_{tm.name}_{df_str}.csv",
)
df_stats = df_stats.round(2)
df_stats.to_csv(fpath)
logging.info(f"Saved summary stats of {Interpretation.CATEGORICAL} {tm.name} tmaps to {fpath}")
# Plot counts of categorical TMAPs over time
if args.time_tensor and (args.time_tensor in args.input_tensors):
min_plotted_counts = 2
for df_cur, df_str in zip([df, df.dropna()], ["union", "intersect"]):
freq = args.time_frequency # Monthly frequency
time_tensors = pd.to_datetime(df_cur[args.time_tensor])
min_date = time_tensors.min()
max_date = time_tensors.max()
date_range = pd.date_range(min_date, max_date, freq=freq)
for tm in categorical_tmaps:
date_range_filtered = [date_range[0]]
prev_date = min_date
tm_counts = defaultdict(list)
for i, date in enumerate(date_range[1:]):
sub_df = df_cur[(time_tensors >= prev_date) & (time_tensors < date)]
channel_sum = 0
for cm in tm.channel_map:
partial_sum = np.sum(sub_df[f'{tm.name} {cm}'])
channel_sum += partial_sum
tm_counts[cm].append(partial_sum)
if channel_sum > min_plotted_counts:
date_range_filtered.append(date)
else:
for cm in tm.channel_map:
tm_counts[cm].pop()
prev_date = date
fpath = os.path.join(args.output_folder, args.id, f'{tm.name}_over_time_{df_str}.png')
plot_categorical_tmap_over_time(tm_counts, tm.name, date_range_filtered, fpath)
# Check if any tmaps are continuous
if Interpretation.CONTINUOUS in [tm.interpretation for tm in tmaps]:
# Iterate through 1) df, 2) df without NaN-containing rows (intersect)
for df_cur, df_str in zip([df, df.dropna()], ["union", "intersect"]):
df_stats = pd.DataFrame()
if df_cur.empty:
logging.info(
f"{df_str} of tensors results in empty dataframe."
f" Skipping calculations of {Interpretation.CONTINUOUS} summary statistics",
)
else:
for tm in [tm for tm in tmaps if tm.interpretation is Interpretation.CONTINUOUS]:
if tm.channel_map:
for cm in tm.channel_map:
stats = dict()
key = f'{tm.name} {cm}'
stats["min"] = df_cur[key].min()
stats["max"] = df_cur[key].max()
stats["mean"] = df_cur[key].mean()
stats["median"] = df_cur[key].median()
mode = df_cur[key].mode()
stats["mode"] = mode[0] if len(mode) != 0 else np.nan
stats["variance"] = df_cur[key].var()
stats["count"] = df_cur[key].count()
stats["missing"] = df_cur[key].isna().sum()
stats["total"] = len(df_cur[key])
stats["missing_percent"] = stats["missing"] / stats["total"] * 100
df_stats = pd.concat([df_stats, pd.DataFrame([stats], index=[f'{tm.name} {cm}'])])
else:
stats = dict()
key = tm.name
stats["min"] = df_cur[key].min()
stats["max"] = df_cur[key].max()
stats["mean"] = df_cur[key].mean()
stats["median"] = df_cur[key].median()
mode = df_cur[key].mode()
stats["mode"] = mode[0] if len(mode) != 0 else np.nan
stats["variance"] = df_cur[key].var()
stats["count"] = df_cur[key].count()
stats["missing"] = df_cur[key].isna().sum()
stats["total"] = len(df_cur[key])
stats["missing_percent"] = stats["missing"] / stats["total"] * 100
df_stats = pd.concat([df_stats, pd.DataFrame([stats], index=[key])])
# Save parent dataframe to CSV on disk
fpath = os.path.join(
args.output_folder, args.id,
f"{fpath_prefix}_{Interpretation.CONTINUOUS}_{df_str}.csv",
)
df_stats = df_stats.round(2)
df_stats.to_csv(fpath)
logging.info(f"Saved summary stats of {Interpretation.CONTINUOUS} tmaps to {fpath}")
# Check if any tmaps are language (strings)
if Interpretation.LANGUAGE in [tm.interpretation for tm in tmaps]:
for df_cur, df_str in zip([df, df.dropna()], ["union", "intersect"]):
df_stats = pd.DataFrame()
if df_cur.empty:
logging.info(
f"{df_str} of tensors results in empty dataframe."
f" Skipping calculations of {Interpretation.LANGUAGE} summary statistics",
)
else:
for tm in [tm for tm in tmaps if tm.interpretation is Interpretation.LANGUAGE]:
if tm.channel_map:
for cm in tm.channel_map:
stats = dict()
key = f'{tm.name} {cm}'
stats["count"] = df_cur[key].count()
stats["count_unique"] = len(df_cur[key].value_counts())
stats["missing"] = df_cur[key].isna().sum()
stats["total"] = len(df_cur[key])
stats["missing_percent"] = stats["missing"] / stats["total"] * 100
df_stats = pd.concat([df_stats, pd.DataFrame([stats], index=[f'{tm.name} {cm}'])])
else:
stats = dict()
key = tm.name
stats["count"] = df_cur[key].count()
stats["count_unique"] = len(df_cur[key].value_counts())
stats["missing"] = df_cur[key].isna().sum()
stats["total"] = len(df_cur[key])
stats["missing_percent"] = stats["missing"] / stats["total"] * 100
df_stats = pd.concat([df_stats, pd.DataFrame([stats], index=[tm.name])])
# Save parent dataframe to CSV on disk
fpath = os.path.join(
args.output_folder, args.id,
f"{fpath_prefix}_{Interpretation.LANGUAGE}_{df_str}.csv",
)
df_stats = df_stats.round(2)
df_stats.to_csv(fpath)
logging.info(f"Saved summary stats of {Interpretation.LANGUAGE} tmaps to {fpath}")
if args.plot_hist == "True":
for tm in args.tensor_maps_in:
if tm.interpretation == Interpretation.CONTINUOUS:
name = tm.name
arr = list(df[name])
plt.figure(figsize=(SUBPLOT_SIZE, SUBPLOT_SIZE))
plt.hist(arr, 50, rwidth=.9)
plt.xlabel(name)
plt.ylabel('Fraction')
plt.rcParams.update({'font.size': 13})
figure_path = os.path.join(args.output_folder, args.id, f"{name}_histogram{IMAGE_EXT}")
plt.savefig(figure_path)
logging.info(f"Saved {name} histogram plot at: {figure_path}")
def cross_reference(args):
"""Cross reference a source cohort with a reference cohort."""
cohort_counts = OrderedDict()
src_path = args.tensors_source
src_name = args.tensors_name
src_join = args.join_tensors
src_time = args.time_tensor
ref_path = args.reference_tensors
ref_name = args.reference_name
ref_join = args.reference_join_tensors
ref_start = args.reference_start_time_tensor
ref_end = args.reference_end_time_tensor
ref_labels = args.reference_labels
number_in_window = args.number_per_window
order_in_window = args.order_in_window
window_names = args.window_name
match_exact_window = order_in_window is not None
match_min_window = not match_exact_window
match_any_window = args.match_any_window
match_every_window = not match_any_window
# parse options
src_cols = list(src_join)
ref_cols = list(ref_join)
if ref_labels is not None:
ref_cols.extend(ref_labels)
def _cols_from_time_windows(time_windows):
return {time_point[0] for time_window in time_windows for time_point in time_window}
use_time = not any(arg is None for arg in [src_time, ref_start, ref_end])
if use_time:
if len(ref_start) != len(ref_end):
raise ValueError(f"Invalid time windows, got {len(ref_start)} starts and {len(ref_end)} ends")
if order_in_window is None:
# if not matching exactly N in time window, order_in_window is None
# make array of blanks so zip doesnt break later
order_in_window = [''] * len(ref_start)
elif len(order_in_window) != len(ref_start):
raise ValueError(f"Ambiguous time selection in time windows, got {len(order_in_window)} order_in_window for {len(ref_start)} windows")
if window_names is None:
window_names = [str(i) for i in range(len(ref_start))]
elif len(window_names) != len(ref_start):
raise ValueError(f"Ambiguous time window names, got {len(window_names)} names for {len(ref_start)} windows")
# get time columns and ensure time windows are defined
src_cols.append(src_time)
# ref start and end are lists of lists, defining time windows
time_windows = list(zip(ref_start, ref_end))
# each time window is defined by a tuples of two lists,
# where the first list of each tuple defines the start point of the time window
# and the second list of each tuple defines the end point of the time window
for start, end in time_windows:
# each start/end point list is two elements,
# where the first element in the list is the name of the time tensor
# and the second element is the offset to the value of the time tensor
# add day offset of 0 for time points without explicit offset
[time_point.append(0) for time_point in [start, end] if len(time_point) == 1]
# parse day offset as int
start[1] = int(start[1])
end[1] = int(end[1])
# add unique column names to ref_cols
ref_cols.extend(_cols_from_time_windows(time_windows))
# load data into dataframes
def _load_data(name, path, cols):
if os.path.isdir(path):
logging.debug(f'Assuming {name} is directory of hd5 at {path}')
from ml4h.arguments import tensormap_lookup
# TODO: check if this works!
args.tensor_maps_in = [tensormap_lookup(it) for it in cols]
df = _tensors_to_df(args)[cols]
else:
logging.debug(f'Assuming {name} is a csv at {path}')
df = pd.read_csv(path, usecols=cols, low_memory=False)
return df
src_df = _load_data(src_name, src_path, src_cols)
logging.info(f'Loaded {src_name} into dataframe')
ref_df = _load_data(ref_name, ref_path, ref_cols)
logging.info(f'Loaded {ref_name} into dataframe')
# cleanup time col
if use_time:
src_df[src_time] = pd.to_datetime(src_df[src_time], errors='coerce', infer_datetime_format=True)
src_df.dropna(subset=[src_time], inplace=True)
for ref_time in _cols_from_time_windows(time_windows):
ref_df[ref_time] = pd.to_datetime(ref_df[ref_time], errors='coerce', infer_datetime_format=True)
ref_df.dropna(subset=_cols_from_time_windows(time_windows), inplace=True)
def _add_offset_time(ref_time):
offset = ref_time[1]
ref_time = ref_time[0]
if offset == 0:
return ref_time
ref_time_col = f'{ref_time}_{offset:+}_days'
if ref_time_col not in ref_df:
ref_df[ref_time_col] = ref_df[ref_time].apply(lambda x: x + datetime.timedelta(days=offset))
ref_cols.append(ref_time_col)
return ref_time_col
# convert time windows to tuples of cleaned and parsed column names
time_windows = [(_add_offset_time(start), _add_offset_time(end)) for start, end in time_windows]
logging.info('Cleaned data columns and removed rows that could not be parsed')
# drop duplicates based on cols
src_df.drop_duplicates(subset=src_cols, inplace=True)
ref_df.drop_duplicates(subset=ref_cols, inplace=True)
logging.info('Removed duplicates from dataframes, based on join, time, and label')
cohort_counts[f'{src_name} (total)'] = len(src_df)
cohort_counts[f'{src_name} (unique {" + ".join(src_join)})'] = len(src_df.drop_duplicates(subset=src_join))
cohort_counts[f'{ref_name} (total)'] = len(ref_df)
cohort_counts[f'{ref_name} (unique {" + ".join(ref_join)})'] = len(ref_df.drop_duplicates(subset=ref_join))
# merging on join columns duplicates rows in source if there are duplicate join values in both source and reference
# this is fine, each row in reference needs all associated rows in source
cross_df = src_df.merge(ref_df, how='inner', left_on=src_join, right_on=ref_join).sort_values(src_cols)
logging.info('Cross referenced based on join tensors')
cohort_counts[f'{src_name} in {ref_name} (unique {" + ".join(src_cols)})'] = len(cross_df.drop_duplicates(subset=src_cols))
cohort_counts[f'{src_name} in {ref_name} (unique {" + ".join(src_join)})'] = len(cross_df.drop_duplicates(subset=src_join))
cohort_counts[f'{ref_name} in {src_name} (unique joins + times + labels)'] = len(cross_df.drop_duplicates(subset=ref_cols))
cohort_counts[f'{ref_name} in {src_name} (unique {" + ".join(ref_join)})'] = len(cross_df.drop_duplicates(subset=ref_join))
# dump results and report label distribution
def _report_cross_reference(df, title):
title = title.replace(' ', '_')
if ref_labels is not None:
series = df[ref_labels].astype(str).apply(lambda x: '<>'.join(x), axis=1, raw=True)
label_values, counts = np.unique(series, return_counts=True)
label_values = np.array(list(map(lambda val: val.split('<>'), label_values)))
label_values = np.append(label_values, [['Total']*len(ref_labels)], axis=0)
total = sum(counts)
counts = np.append(counts, [total])
fracs = list(map(lambda f: f'{f:0.5f}', counts / total))
res = pd.DataFrame(data=label_values, columns=ref_labels)
res['count'] = counts
res['fraction total'] = fracs
# save label counts to csv
fpath = os.path.join(args.output_folder, args.id, f'label_counts_{title}.csv')
res.to_csv(fpath, index=False)
logging.info(f'Saved distribution of labels in cross reference to {fpath}')
# save cross reference to csv
fpath = os.path.join(args.output_folder, args.id, f'list_{title}.csv')
df.set_index(src_join, drop=True).to_csv(fpath)
logging.info(f'Saved cross reference to {fpath}')
if use_time:
# count rows across time windows
def _count_time_windows(dfs, title, exact_or_min):
if type(dfs) is list:
# Number of pre-op (surgdt -180 days; surgdt) ECG from patients with 1+ ECG in all windows
# Number of distinct pre-op (surgdt -180 days; surgdt) ECG from patients with 1+ ECG in all windows
# Number of distinct pre-op (surgdt -180 days; surgdt) partners_ecg_patientid_clean from patients with 1+ ECG in all windows
# Number of newest pre-op (surgdt -180 days; surgdt) ECG from patients with 1 ECG in all windows
# Number of distinct newest pre-op (surgdt -180 days; surgdt) ECG from patients with 1 ECG in all windows
# Number of distinct newest pre-op (surgdt -180 days; surgdt) partners_ecg_patientid_clean from patients with 1 ECG in all windows
for df, window_name, order, (start, end) in zip(dfs, window_names, order_in_window, time_windows):
order = f'{order} ' if exact_or_min == 'exactly' else ''
start = start.replace('_', ' ')
end = end.replace('_', ' ')
cohort_counts[f'Number of {order}{window_name} ({start}; {end}) {src_name} from patients with {title}'] = len(df)
cohort_counts[f'Number of distinct {order}{window_name} ({start}; {end}) {src_name} from patients with {title}'] = len(df.drop_duplicates(subset=src_cols))
cohort_counts[f'Number of distinct {order}{window_name} ({start}; {end}) {" + ".join(src_join)} from patients with {title}'] = len(df.drop_duplicates(subset=src_join))
else:
# Number of ECGs from patients with 1+ ECG in all windows
# Number of distinct ECGs from patients with 1+ ECG in all windows
# Number of distinct partners_ecg_patientid_clean from patients with 1+ ECG in all windows
df = dfs
cohort_counts[f'Number of {src_name} from patients with {title}'] = len(df)
cohort_counts[f'Number of distinct {src_name} from patients with {title}'] = len(df.drop_duplicates(subset=src_cols))
cohort_counts[f'Number of distinct {" + ".join(src_join)} from patients with {title}'] = len(df.drop_duplicates(subset=src_join))
# aggregate all time windows back into one dataframe with indicator for time window index
def _aggregate_time_windows(time_window_dfs, window_names):
for df, window_name in zip(time_window_dfs, window_names):
if 'time_window' not in df:
df['time_window'] = window_name
aggregated_df = pd.concat(time_window_dfs, ignore_index=True).sort_values(by=src_cols + ['time_window'], ignore_index=True)
return aggregated_df
# get only occurrences for join_tensors that appear in every time window
def _intersect_time_windows(time_window_dfs):
# find the intersection of join_tensors that appear in all time_window_dfs
join_tensor_intersect = reduce(lambda a, b: a.merge(b), [pd.DataFrame(df[src_join].drop_duplicates()) for df in time_window_dfs])
# filter time_window_dfs to only the rows that have join_tensors across all time windows
time_window_dfs_intersect = [df.merge(join_tensor_intersect) for df in time_window_dfs]
return time_window_dfs_intersect
# 1. get data with at least N (default 1) occurrences in all time windows
# 2. within each time window, get only data for join_tensors that have N rows in the time window
# 3. across all time windows, get only data for join_tensors that have data in all time windows
# get df for each time window
dfs_min_in_any_time_window = [
cross_df[(cross_df[start] < cross_df[src_time]) & (cross_df[src_time] < cross_df[end])]
for start, end in time_windows
]
# get at least N occurrences in any time window
dfs_min_in_any_time_window = [df.groupby(src_join+[start, end]).filter(lambda g: len(g) >= number_in_window) for df, (start, end) in zip(dfs_min_in_any_time_window, time_windows)]
if match_min_window and match_any_window:
min_in_any_time_window = _aggregate_time_windows(dfs_min_in_any_time_window, window_names)
logging.info(f"Cross referenced so unique event occurs {number_in_window}+ times in any time window")
title = f'{number_in_window}+ in any window'
_report_cross_reference(min_in_any_time_window, title)
_count_time_windows(dfs_min_in_any_time_window, title, 'at least')
if len(dfs_min_in_any_time_window) > 1:
_count_time_windows(min_in_any_time_window, title, 'at least')
# get at least N occurrences in every time window
if match_min_window and match_every_window:
dfs_min_in_every_time_window = _intersect_time_windows(dfs_min_in_any_time_window)
min_in_every_time_window = _aggregate_time_windows(dfs_min_in_every_time_window, window_names)
logging.info(f"Cross referenced so unique event occurs {number_in_window}+ times in all time windows")
title = f'{number_in_window}+ in all windows'
_report_cross_reference(min_in_every_time_window, title)
_count_time_windows(dfs_min_in_every_time_window, title, 'at least')
if len(dfs_min_in_every_time_window) > 1:
_count_time_windows(min_in_every_time_window, title, 'at least')
# get exactly N occurrences, select based on ordering
def _get_occurrences(df, order, start, end):
if order == 'newest':
df = df.groupby(src_join+[start, end]).tail(number_in_window)
elif order == 'oldest':
df = df.groupby(src_join+[start, end]).head(number_in_window)
elif order == 'random':
df = df.groupby(src_join+[start, end]).apply(lambda g: g.sample(number_in_window))
else:
raise NotImplementedError(f"Ordering for which rows to use in time window unknown: '{order}'")
return df.reset_index(drop=True)
# get exactly N occurrences in any time window
if match_exact_window:
dfs_exact_in_any_time_window = [_get_occurrences(df, order, start, end) for df, order, (start, end) in zip(dfs_min_in_any_time_window, order_in_window, time_windows)]
if match_exact_window and match_any_window:
exact_in_any_time_window = _aggregate_time_windows(dfs_exact_in_any_time_window, window_names)
logging.info(f"Cross referenced so unique event occurs exactly {number_in_window} times in any time window")
title = f'{number_in_window} in any window'
_report_cross_reference(exact_in_any_time_window, title)
_count_time_windows(dfs_exact_in_any_time_window, title, 'exactly')
if len(dfs_exact_in_any_time_window) > 1:
_count_time_windows(exact_in_any_time_window, title, 'exactly')
# get exactly N occurrences in every time window
if match_exact_window and match_every_window:
dfs_exact_in_every_time_window = _intersect_time_windows(dfs_exact_in_any_time_window)
exact_in_every_time_window = _aggregate_time_windows(dfs_exact_in_every_time_window, window_names)
logging.info(f"Cross referenced so unique event occurs exactly {number_in_window} times in all time windows")
title = f'{number_in_window} in all windows'
_report_cross_reference(exact_in_every_time_window, title)
_count_time_windows(dfs_exact_in_every_time_window, title, 'exactly')
if len(dfs_exact_in_every_time_window) > 1:
_count_time_windows(exact_in_every_time_window, title, 'exactly')
else:
_report_cross_reference(cross_df, f'all {src_name} in {ref_name}')
# report counts
fpath = os.path.join(args.output_folder, args.id, 'summary_cohort_counts.csv')
pd.DataFrame.from_dict(cohort_counts, orient='index', columns=['count']).rename_axis('description').to_csv(fpath)
logging.info(f'Saved cohort counts to {fpath}')
def directions_in_latent_space(stratify_column, stratify_thresh, split_column, split_thresh, latent_cols, latent_df):
hit = latent_df.loc[latent_df[stratify_column] >= stratify_thresh][latent_cols].to_numpy()
miss = latent_df.loc[latent_df[stratify_column] < stratify_thresh][latent_cols].to_numpy()
miss_mean_vector = np.mean(miss, axis=0)
hit_mean_vector = np.mean(hit, axis=0)
strat_vector = hit_mean_vector - miss_mean_vector
hit1 = latent_df.loc[(latent_df[stratify_column] >= stratify_thresh)
& (latent_df[split_column] >= split_thresh)][latent_cols].to_numpy()
miss1 = latent_df.loc[(latent_df[stratify_column] < stratify_thresh)
& (latent_df[split_column] >= split_thresh)][latent_cols].to_numpy()
hit2 = latent_df.loc[(latent_df[stratify_column] >= stratify_thresh)
& (latent_df[split_column] < split_thresh)][latent_cols].to_numpy()
miss2 = latent_df.loc[(latent_df[stratify_column] < stratify_thresh)
& (latent_df[split_column] < split_thresh)][latent_cols].to_numpy()
miss_mean_vector1 = np.mean(miss1, axis=0)
hit_mean_vector1 = np.mean(hit1, axis=0)
angle1 = angle_between(miss_mean_vector1, hit_mean_vector1)
miss_mean_vector2 = np.mean(miss2, axis=0)
hit_mean_vector2 = np.mean(hit2, axis=0)
angle2 = angle_between(miss_mean_vector2, hit_mean_vector2)
h1_vector = hit_mean_vector1 - miss_mean_vector1
h2_vector = hit_mean_vector2 - miss_mean_vector2
angle3 = angle_between(h1_vector, h2_vector)
print(f'\n Between {stratify_column}, and splits: {split_column}\n',
f'Angles h1 and m1: {angle1:.2f}, h2 and m2 {angle2:.2f} h1-m1 and h2-m2 {angle3:.2f} degrees.\n'
f'stratify threshold: {stratify_thresh}, split thresh: {split_thresh}, \n'
f'hit_mean_vector2 shape {miss_mean_vector1.shape}, miss1:{hit_mean_vector2.shape} \n'
f'Hit1 shape {hit1.shape}, miss1:{miss1.shape} threshold:{stratify_thresh}\n'
f'Hit2 shape {hit2.shape}, miss2:{miss2.shape}\n')
return hit_mean_vector1, miss_mean_vector1, hit_mean_vector2, miss_mean_vector2
def latent_space_dataframe(infer_hidden_tsv, explore_csv):
df = pd.read_csv(explore_csv)
df['fpath'] = pd.to_numeric(df['fpath'], errors='coerce')
df2 = pd.read_csv(infer_hidden_tsv, sep='\t')
df2['sample_id'] = pd.to_numeric(df2['sample_id'], errors='coerce')
latent_df = pd.merge(df, df2, left_on='fpath', right_on='sample_id', how='inner')
latent_df.info()
return latent_df
|
acquisitions.py
|
import numpy as np
import multiprocessing
import threading
from inspect import signature
import time
from pycromanager.zmq import deserialize_array, Bridge
from pycromanager.data import Dataset
import warnings
import os.path
import queue
from pycromanager.zmq import JavaObjectShadow
from docstring_inheritance import NumpyDocstringInheritanceMeta
### These functions outside class to prevent problems with pickling when running them in differnet process
def _run_acq_event_source(bridge_port, event_port, event_queue, bridge_timeout=Bridge.DEFAULT_TIMEOUT, debug=False):
"""
Parameters
----------
event_port :
event_queue :
debug :
(Default value = False)
Returns
-------
"""
with Bridge(debug=debug, port=bridge_port, timeout=bridge_timeout) as bridge:
event_socket = bridge._connect_push(event_port)
while True:
events = event_queue.get(block=True)
if debug:
print("got event(s):", events)
if events is None:
# Poison, time to shut down
event_socket.send({"events": [{"special": "acquisition-end"}]})
event_socket.close()
return
event_socket.send({"events": events if type(events) == list else [events]})
if debug:
print("sent events")
def _run_acq_hook(bridge_port, pull_port, push_port, hook_connected_evt, event_queue, hook_fn, debug):
"""
Parameters
----------
pull_port :
push_port :
hook_connected_evt :
event_queue :
hook_fn :
debug :
Returns
-------
"""
with Bridge(debug=debug, port=bridge_port) as bridge:
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
hook_connected_evt.set()
while True:
event_msg = pull_socket.receive()
if "special" in event_msg and event_msg["special"] == "acquisition-end":
push_socket.send({})
push_socket.close()
pull_socket.close()
return
else:
if "events" in event_msg.keys():
event_msg = event_msg["events"] # convert from sequence
params = signature(hook_fn).parameters
if len(params) == 1 or len(params) == 3:
try:
if len(params) == 1:
new_event_msg = hook_fn(event_msg)
elif len(params) == 3:
new_event_msg = hook_fn(event_msg, bridge, event_queue)
except Exception as e:
warnings.warn("exception in acquisition hook: {}".format(e))
continue
else:
raise Exception("Incorrect number of arguments for hook function. Must be 1 or 3")
if isinstance(new_event_msg, list):
new_event_msg = {
"events": new_event_msg
} # convert back to the expected format for a sequence
push_socket.send(new_event_msg)
def _run_image_processor(
bridge_port, pull_port, push_port, sockets_connected_evt, process_fn, event_queue, debug
):
"""
Parameters
----------
pull_port :
push_port :
sockets_connected_evt :
process_fn :
event_queue :
debug :
Returns
-------
"""
with Bridge(debug=debug, port=bridge_port) as bridge:
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
if debug:
print("image processing sockets connected")
sockets_connected_evt.set()
def process_and_sendoff(image_tags_tuple, original_dtype):
"""
Parameters
----------
image_tags_tuple :
Returns
-------
"""
if len(image_tags_tuple) != 2:
raise Exception("If image is returned, it must be of the form (pixel, metadata)")
pixels = image_tags_tuple[0]
metadata = image_tags_tuple[1]
# only accepts same pixel type as original
if not np.issubdtype(image_tags_tuple[0].dtype, original_dtype) and not np.issubdtype(
original_dtype, image_tags_tuple[0].dtype
):
raise Exception(
"Processed image pixels must have same dtype as input image pixels, "
"but instead they were {} and {}".format(image_tags_tuple[0].dtype, pixels.dtype)
)
if metadata['PixelType'] == 'RGB32':
if pixels.shape[-1] == 3:
#append 0 for alpha channel because thats whats expected
pixels = np.concatenate([pixels, np.zeros_like(pixels[..., 0])[..., None]], axis=2)
else:
#maybe pixel type was changed by processing?
metadata["PixelType"] = "GRAY8" if pixels.dtype.itemsize == 1 else "GRAY16"
processed_img = {
"pixels": pixels.tobytes(),
"metadata": metadata,
}
push_socket.send(processed_img)
while True:
message = None
while message is None:
message = pull_socket.receive(timeout=30) # check for new message
if "special" in message and message["special"] == "finished":
push_socket.send(message) # Continue propagating the finihsed signal
push_socket.close()
pull_socket.close()
return
metadata = message["metadata"]
pixels = deserialize_array(message["pixels"])
if metadata['PixelType'] == 'RGB32':
image = np.reshape(pixels, [metadata["Height"], metadata["Width"], 4])[..., :3]
else:
image = np.reshape(pixels, [metadata["Height"], metadata["Width"]])
params = signature(process_fn).parameters
if len(params) == 2 or len(params) == 4:
processed = None
try:
if len(params) == 2:
processed = process_fn(image, metadata)
elif len(params) == 4:
processed = process_fn(image, metadata, bridge, event_queue)
except Exception as e:
warnings.warn("exception in image processor: {}".format(e))
continue
else:
raise Exception(
"Incorrect number of arguments for image processing function, must be 2 or 4"
)
if processed is None:
continue
if type(processed) == list:
for image in processed:
process_and_sendoff(image, pixels.dtype)
else:
process_and_sendoff(processed, pixels.dtype)
class Acquisition(object, metaclass=NumpyDocstringInheritanceMeta):
"""
Base class for Pycro-Manager acquisitions
"""
def __init__(
self,
directory: str=None,
name: str=None,
image_process_fn : callable=None,
event_generation_hook_fn: callable=None,
pre_hardware_hook_fn: callable=None,
post_hardware_hook_fn: callable=None,
post_camera_hook_fn: callable=None,
show_display: bool=True,
image_saved_fn: callable=None,
process: bool=False,
saving_queue_size: int=20,
bridge_timeout: int=500,
port: int=Bridge.DEFAULT_PORT,
debug: int=False,
core_log_debug: int=False,
):
"""
Parameters
----------
directory : str
saving directory for this acquisition. Required unless an image process function will be
implemented that diverts images from saving
name : str
Saving name for the acquisition. Required unless an image process function will be
implemented that diverts images from saving
image_process_fn : Callable
image processing function that will be called on each image that gets acquired.
Can either take two arguments (image, metadata) where image is a numpy array and metadata is a dict
containing the corresponding iamge metadata. Or a 4 argument version is accepted, which accepts (image,
metadata, bridge, queue), where bridge and queue are an instance of the pycromanager.acquire.Bridge
object for the purposes of interacting with arbitrary code on the Java side (such as the micro-manager
core), and queue is a Queue objects that holds upcomning acquisition events. Both version must either
return
event_generation_hook_fn : Callable
hook function that will as soon as acquisition events are generated (before hardware sequencing optimization
in the acquisition engine. This is useful if one wants to modify acquisition events that they didn't generate
(e.g. those generated by a GUI application). Accepts either one argument (the current acquisition event)
or three arguments (current event, bridge, event Queue)
pre_hardware_hook_fn : Callable
hook function that will be run just before the hardware is updated before acquiring
a new image. In the case of hardware sequencing, it will be run just before a sequence of instructions are
dispatched to the hardware. Accepts either one argument (the current acquisition event) or three arguments
(current event, bridge, event Queue)
post_hardware_hook_fn : Callable
hook function that will be run just before the hardware is updated before acquiring
a new image. In the case of hardware sequencing, it will be run just after a sequence of instructions are
dispatched to the hardware, but before the camera sequence has been started. Accepts either one argument
(the current acquisition event) or three arguments (current event, bridge, event Queue)
post_camera_hook_fn : Callable
hook function that will be run just after the camera has been triggered to snapImage or
startSequence. A common use case for this hook is when one want to send TTL triggers to the camera from an
external timing device that synchronizes with other hardware. Accepts either one argument (the current
acquisition event) or three arguments (current event, bridge, event Queue)
show_display : bool
show the image viewer window
image_saved_fn : Callable
function that takes two arguments (the Axes of the image that just finished saving, and the Dataset)
and gets called whenever a new image is written to disk
process : bool
Use multiprocessing instead of multithreading for acquisition hooks and image
processors. This can be used to speed up CPU-bounded processing by eliminating bottlenecks
caused by Python's Global Interpreter Lock, but also creates complications on Windows-based
systems
saving_queue_size : int
The number of images to queue (in memory) while waiting to write to disk. Higher values should
in theory allow sequence acquisitions to go faster, but requires the RAM to hold images while
they are waiting to save
bridge_timeout :
Timeout in ms of all operations going throught the Bridge
port :
Allows overriding the defualt port for using Java side servers on a different port
debug : bool
whether to print debug messages
core_log_debug : bool
Print debug messages on java side in the micro-manager core log
"""
self._bridge_timeout = bridge_timeout
self.bridge = Bridge(debug=debug, port=port, timeout=bridge_timeout)
self._bridge_port = port
self._debug = debug
self._dataset = None
self._finished = False
# Get a dict of all named argument values (or default values when nothing provided)
arg_names = [k for k in signature(Acquisition.__init__).parameters.keys() if k != 'self']
l = locals()
named_args = {arg_name: (l[arg_name] if arg_name in l else
dict(signature(Acquisition.__init__).parameters.items())[arg_name].default)
for arg_name in arg_names }
if directory is not None:
# Expend ~ in path
directory = os.path.expanduser(directory)
# If path is relative, retain knowledge of the current working directory
named_args['directory'] = os.path.abspath(directory)
self._create_event_queue(**named_args)
self._create_remote_acquisition(**named_args)
self._initialize_image_processor(**named_args)
self._initialize_hooks(**named_args)
self._remote_acq.start()
self._dataset_disk_location = (
self._remote_acq.get_storage().get_disk_location()
if self._remote_acq.get_storage() is not None
else None
)
self._start_events()
if image_saved_fn is not None:
self._dataset = Dataset(remote_storage_monitor=self._remote_acq.get_storage_monitor())
self._storage_monitor_thread = self._dataset._add_storage_monitor_fn(
callback_fn=image_saved_fn, debug=self._debug
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._event_queue is not None: # magellan acquisitions dont have this
# this should shut down storage and viewer as apporpriate
self._event_queue.put(None)
# now wait on it to finish
self.await_completion()
def _start_events(self, **kwargs):
self.event_port = self._remote_acq.get_event_port()
self._event_thread = threading.Thread(
target=_run_acq_event_source,
args=(self._bridge_port, self.event_port, self._event_queue, self._bridge_timeout, self._debug),
name="Event sending",
)
self._event_thread.start()
def _initialize_image_processor(self, **kwargs):
if kwargs['image_process_fn'] is not None:
java_processor = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteImageProcessor"
)
self._remote_acq.add_image_processor(java_processor)
self._processor_thread = self._start_processor(
java_processor, kwargs['image_process_fn'], self._event_queue, process=kwargs['process'])
def _initialize_hooks(self, **kwargs):
self._hook_threads = []
if kwargs['event_generation_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook, kwargs['event_generation_hook_fn'],
self._event_queue, process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.EVENT_GENERATION_HOOK)
if kwargs['pre_hardware_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook,
kwargs['pre_hardware_hook_fn'], self._event_queue,
process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.BEFORE_HARDWARE_HOOK)
if kwargs['post_hardware_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook, kwargs['post_hardware_hook_fn'],
self._event_queue, process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.AFTER_HARDWARE_HOOK)
if kwargs['post_camera_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook, kwargs['post_camera_hook_fn'],
self._event_queue, process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.AFTER_CAMERA_HOOK)
def _create_event_queue(self, **kwargs):
# Create thread safe queue for events so they can be passed from multiple processes
self._event_queue = multiprocessing.Queue() if kwargs['process'] else queue.Queue()
def _create_remote_acquisition(self, **kwargs):
core = self.bridge.get_core()
acq_factory = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcquisitionFactory", args=[core]
)
show_viewer = kwargs['show_display'] and (kwargs['directory'] is not None and kwargs['name'] is not None)
self._remote_acq = acq_factory.create_acquisition(
kwargs['directory'],
kwargs['name'],
show_viewer,
kwargs['saving_queue_size'],
kwargs['core_log_debug'],
)
def get_dataset(self):
"""
Get access to the dataset backing this acquisition. If the acquisition is in progress,
return a Dataset object that wraps the java class containing it. If the acquisition is finished,
load the dataset from disk on the Python side for better performance
"""
if self._finished:
if self._dataset is None or self._dataset._remote_storage_monitor is not None:
self._dataset = Dataset(self._dataset_disk_location)
elif self._dataset is None:
# Load remote storage
self._dataset = Dataset(remote_storage_monitor=self._remote_acq.get_storage_monitor())
# Monitor image arrival so they can be loaded on python side, but with no callback function
self._storage_monitor_thread = self._dataset._add_storage_monitor_fn(callback_fn=None, debug=self._debug)
return self._dataset
def await_completion(self):
"""Wait for acquisition to finish and resources to be cleaned up"""
while not self._remote_acq.is_finished():
time.sleep(0.1)
self._remote_acq = None
# Wait on all the other threads to shut down properly
if hasattr(self, '_storage_monitor_thread'):
self._storage_monitor_thread.join()
for hook_thread in self._hook_threads:
hook_thread.join()
if hasattr(self, '_event_thread'):
self._event_thread.join()
self.bridge.close()
self._finished = True
def acquire(self, events: dict or list, keep_shutter_open=False):
"""Submit an event or a list of events for acquisition. Optimizations (i.e. taking advantage of
hardware synchronization, where available), will take place across this list of events, but not
over multiple calls of this method. A single event is a python dictionary with a specific structure
Parameters
----------
events : list, dict
A single acquistion event (a dict) or a list of acquisition events
keep_shutter_open :
(Default value = False)
Returns
-------
"""
if keep_shutter_open and isinstance(events, list):
for e in events:
e["keep_shutter_open"] = True
events.append(
{"keep_shutter_open": False}
) # return to autoshutter, dont acquire an image
elif keep_shutter_open and isinstance(events, dict):
events["keep_shutter_open"] = True
events = [
events,
{"keep_shutter_open": False},
] # return to autoshutter, dont acquire an image
self._event_queue.put(events)
def _start_hook(self, remote_hook : JavaObjectShadow, remote_hook_fn : callable, event_queue, process):
"""
Parameters
----------
remote_hook :
remote_hook_fn :
event_queue :
process :
Returns
-------
"""
hook_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = remote_hook.get_pull_port()
push_port = remote_hook.get_push_port()
hook_thread = (multiprocessing.Process if process else threading.Thread)(
target=_run_acq_hook,
name="AcquisitionHook",
args=(
self._bridge_port,
pull_port,
push_port,
hook_connected_evt,
event_queue,
remote_hook_fn,
self._debug,
),
)
# if process else threading.Thread(target=_acq_hook_fn, args=(), name='AcquisitionHook')
hook_thread.start()
hook_connected_evt.wait() # wait for push/pull sockets to connect
return hook_thread
def _start_processor(self, processor, process_fn, event_queue, process):
"""
Parameters
----------
processor :
process_fn :
event_queue :
process :
Returns
-------
"""
# this must start first
processor.start_pull()
sockets_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = processor.get_pull_port()
push_port = processor.get_push_port()
processor_thread = (multiprocessing.Process if process else threading.Thread)(
target=_run_image_processor,
args=(
self._bridge_port,
pull_port,
push_port,
sockets_connected_evt,
process_fn,
event_queue,
self._debug,
),
name="ImageProcessor",
)
processor_thread.start()
sockets_connected_evt.wait() # wait for push/pull sockets to connect
processor.start_push()
return processor_thread
class XYTiledAcquisition(Acquisition):
"""
For making tiled images with an XY stage and multiresolution saving
(e.g. for making one large contiguous image of a sample larger than the field of view)
"""
def __init__(
self,
tile_overlap : int or tuple,
directory: str=None,
name: str=None,
max_multi_res_index: int=None,
image_process_fn: callable=None,
pre_hardware_hook_fn: callable=None,
post_hardware_hook_fn: callable=None,
post_camera_hook_fn: callable=None,
show_display: bool=True,
image_saved_fn: callable=None,
process: bool=False,
saving_queue_size: int=20,
bridge_timeout: int=500,
port: int=Bridge.DEFAULT_PORT,
debug: bool=False,
core_log_debug: bool=False,
):
"""
Parameters
----------
tile_overlap : int or tuple of int
If given, XY tiles will be laid out in a grid and multi-resolution saving will be
actived. Argument can be a two element tuple describing the pixel overlaps between adjacent
tiles. i.e. (pixel_overlap_x, pixel_overlap_y), or an integer to use the same overlap for both.
For these features to work, the current hardware configuration must have a valid affine transform
between camera coordinates and XY stage coordinates
max_multi_res_index : int
Maximum index to downsample to in multi-res pyramid mode. 0 is no downsampling,
1 is downsampled up to 2x, 2 is downsampled up to 4x, etc. If not provided, it will be dynamically
calculated and updated from data
"""
self.tile_overlap = tile_overlap
self.max_multi_res_index = max_multi_res_index
# Collct all argument values except the ones specific to Magellan
arg_names = list(signature(self.__init__).parameters.keys())
arg_names.remove('tile_overlap')
arg_names.remove('max_multi_res_index')
l = locals()
named_args = {arg_name: l[arg_name] for arg_name in arg_names}
super().__init__(**named_args)
def _create_remote_acquisition(self, **kwargs):
core = self.bridge.get_core()
acq_factory = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcquisitionFactory", args=[core]
)
show_viewer = kwargs['show_display'] and (kwargs['directory'] is not None and kwargs['name'] is not None)
if type(self.tile_overlap) is tuple:
x_overlap, y_overlap = self.tile_overlap
else:
x_overlap = self.tile_overlap
y_overlap = self.tile_overlap
self._remote_acq = acq_factory.create_tiled_acquisition(
kwargs['directory'],
kwargs['name'],
show_viewer,
True,
x_overlap,
y_overlap,
self.max_multi_res_index if self.max_multi_res_index is not None else -1,
kwargs['saving_queue_size'],
kwargs['core_log_debug'],
)
class MagellanAcquisition(Acquisition):
"""
Class used for launching Micro-Magellan acquisitions. Must pass either magellan_acq_index
or magellan_explore as an argument
"""
def __init__(
self,
magellan_acq_index: int=None,
magellan_explore: bool=False,
image_process_fn: callable=None,
event_generation_hook_fn: callable=None,
pre_hardware_hook_fn: callable=None,
post_hardware_hook_fn: callable=None,
post_camera_hook_fn: callable=None,
image_saved_fn: callable=None,
bridge_timeout: int=500,
port: int=Bridge.DEFAULT_PORT,
debug: bool=False,
core_log_debug: bool=False,
):
"""
Parameters
----------
magellan_acq_index : int
run this acquisition using the settings specified at this position in the main
GUI of micro-magellan (micro-manager plugin). This index starts at 0
magellan_explore : bool
Run a Micro-magellan explore acquisition
"""
self.magellan_acq_index = magellan_acq_index
self.magellan_explore = magellan_explore
# Collct all argument values except the ones specific to Magellan
arg_names = list(signature(self.__init__).parameters.keys())
arg_names.remove('magellan_acq_index')
arg_names.remove('magellan_explore')
l = locals()
named_args = {arg_name: l[arg_name] for arg_name in arg_names}
super().__init__(**named_args)
def _start_events(self, **kwargs):
pass # Magellan handles this on Java side
def _create_event_queue(self, **kwargs):
pass # Magellan handles this on Java side
def _create_remote_acquisition(self, **kwargs):
if self.magellan_acq_index is not None:
magellan_api = self.bridge.get_magellan()
self._remote_acq = magellan_api.create_acquisition(self.magellan_acq_index)
self._event_queue = None
elif self.magellan_explore:
magellan_api = self.bridge.get_magellan()
self._remote_acq = magellan_api.create_explore_acquisition()
self._event_queue = None
|
openolt_device.py
|
#
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import binascii
import grpc
import structlog
from twisted.internet import reactor
from scapy.layers.l2 import Ether, Dot1Q
from transitions import Machine
from voltha.protos.device_pb2 import Port, Device
from voltha.protos.common_pb2 import OperStatus, AdminState, ConnectStatus
from voltha.protos.logical_device_pb2 import LogicalDevice
from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
OFPPS_LINK_DOWN, OFPPF_1GB_FD, OFPC_GROUP_STATS, OFPC_PORT_STATS, \
OFPC_TABLE_STATS, OFPC_FLOW_STATS, ofp_switch_features, ofp_port, \
ofp_port_stats, ofp_desc
from voltha.protos.logical_device_pb2 import LogicalPort
from voltha.core.logical_device_agent import mac_str_to_tuple
from voltha.registry import registry
from voltha.adapters.openolt.protos import openolt_pb2_grpc, openolt_pb2
from voltha.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
from voltha.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
from voltha.protos.bbf_fiber_base_pb2 import VEnetConfig
import voltha.core.flow_decomposer as fd
from voltha.adapters.openolt.openolt_statistics import OpenOltStatisticsMgr
import voltha.adapters.openolt.openolt_platform as platform
from voltha.adapters.openolt.openolt_flow_mgr import OpenOltFlowMgr, \
DEFAULT_MGMT_VLAN
from voltha.adapters.openolt.openolt_alarms import OpenOltAlarmMgr
from voltha.adapters.openolt.openolt_bw import OpenOltBW
from voltha.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm
class OpenoltDevice(object):
"""
OpenoltDevice state machine:
null ----> init ------> connected -----> up -----> down
^ ^ | ^ | |
| | | | | |
| +-------------+ +---------+ |
| |
+-----------------------------------------+
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=R0904
states = [
'state_null',
'state_init',
'state_connected',
'state_up',
'state_down']
transitions = [
{'trigger': 'go_state_init',
'source': ['state_null', 'state_connected', 'state_down'],
'dest': 'state_init',
'before': 'do_state_init'},
{'trigger': 'go_state_connected',
'source': 'state_init',
'dest': 'state_connected',
'before': 'do_state_connected'},
{'trigger': 'go_state_up',
'source': ['state_connected', 'state_down'],
'dest': 'state_up',
'before': 'do_state_up'},
{'trigger': 'go_state_down',
'source': ['state_up'],
'dest': 'state_down',
'before': 'do_state_down'}]
def __init__(self, **kwargs):
super(OpenoltDevice, self).__init__()
self.adapter_agent = kwargs['adapter_agent']
self.device_num = kwargs['device_num']
device = kwargs['device']
is_reconciliation = kwargs.get('reconciliation', False)
self.device_id = device.id
self.host_and_port = device.host_and_port
self.log = structlog.get_logger(id=self.device_id,
ip=self.host_and_port)
self.proxy = registry('core').get_proxy('/')
# Device already set in the event of reconciliation
if not is_reconciliation:
# It is a new device
# Update device
device.root = True
device.serial_number = self.host_and_port # FIXME
device.connect_status = ConnectStatus.UNREACHABLE
device.oper_status = OperStatus.ACTIVATING
self.adapter_agent.update_device(device)
# If logical device does not exist create it
if not device.parent_id:
dpid = '00:00:' + self.ip_hex(self.host_and_port.split(":")[0])
# Create logical OF device
ld = LogicalDevice(
root_device_id=self.device_id,
switch_features=ofp_switch_features(
n_buffers=256, # TODO fake for now
n_tables=2, # TODO ditto
capabilities=( # TODO and ditto
OFPC_FLOW_STATS
| OFPC_TABLE_STATS
| OFPC_PORT_STATS
| OFPC_GROUP_STATS
)
),
desc=ofp_desc(
serial_num=device.serial_number
)
)
ld_init = self.adapter_agent.create_logical_device(ld,
dpid=dpid)
self.logical_device_id = ld_init.id
else:
# logical device already exists
self.logical_device_id = device.parent_id
if is_reconciliation:
self.adapter_agent.reconcile_logical_device(
self.logical_device_id)
# Initialize the OLT state machine
self.machine = Machine(model=self, states=OpenoltDevice.states,
transitions=OpenoltDevice.transitions,
send_event=True, initial='state_null')
self.go_state_init()
def do_state_init(self, event):
# Initialize gRPC
self.channel = grpc.insecure_channel(self.host_and_port)
self.channel_ready_future = grpc.channel_ready_future(self.channel)
# Catch RuntimeError exception
try:
# Start indications thread
self.indications_thread_handle = threading.Thread(
target=self.indications_thread)
# Old getter/setter API for daemon; use it directly as a
# property instead. The Jinkins error will happon on the reason of
# Exception in thread Thread-1 (most likely raised # during
# interpreter shutdown)
self.indications_thread_handle.setDaemon(True)
self.indications_thread_handle.start()
except Exception as e:
self.log.exception('do_state_init failed', e=e)
'''
# FIXME - Move to oper_up state without connecting to OLT?
if is_reconciliation:
# Put state machine in state up
reactor.callFromThread(self.go_state_up, reconciliation=True)
'''
self.log.info('openolt-device-created', device_id=self.device_id)
def do_state_connected(self, event):
self.log.debug("do_state_connected")
device = self.adapter_agent.get_device(self.device_id)
device.connect_status = ConnectStatus.REACHABLE
self.adapter_agent.update_device(device)
self.stub = openolt_pb2_grpc.OpenoltStub(self.channel)
self.flow_mgr = OpenOltFlowMgr(self.log, self.stub, self.device_id)
self.alarm_mgr = OpenOltAlarmMgr(self.log, self.adapter_agent,
self.device_id,
self.logical_device_id)
self.stats_mgr = OpenOltStatisticsMgr(self, self.log)
self.bw_mgr = OpenOltBW(self.log, self.proxy)
def do_state_up(self, event):
self.log.debug("do_state_up")
device = self.adapter_agent.get_device(self.device_id)
# Update phys OF device
device.parent_id = self.logical_device_id
device.oper_status = OperStatus.ACTIVE
self.adapter_agent.update_device(device)
def do_state_down(self, event):
self.log.debug("do_state_down")
oper_state = OperStatus.UNKNOWN
connect_state = ConnectStatus.UNREACHABLE
# Propagating to the children
# Children ports
child_devices = self.adapter_agent.get_child_devices(self.device_id)
for onu_device in child_devices:
uni_no = platform.mk_uni_port_num(
onu_device.proxy_address.channel_id,
onu_device.proxy_address.onu_id)
uni_name = self.port_name(uni_no, Port.ETHERNET_UNI,
serial_number=onu_device.serial_number)
self.onu_ports_down(onu_device, uni_no, uni_name, oper_state)
# Children devices
self.adapter_agent.update_child_devices_state(
self.device_id, oper_status=oper_state,
connect_status=connect_state)
# Device Ports
device_ports = self.adapter_agent.get_ports(self.device_id,
Port.ETHERNET_NNI)
logical_ports_ids = [port.label for port in device_ports]
device_ports += self.adapter_agent.get_ports(self.device_id,
Port.PON_OLT)
for port in device_ports:
port.oper_status = oper_state
self.adapter_agent.add_port(self.device_id, port)
# Device logical port
for logical_port_id in logical_ports_ids:
logical_port = self.adapter_agent.get_logical_port(
self.logical_device_id, logical_port_id)
logical_port.ofp_port.state = OFPPS_LINK_DOWN
self.adapter_agent.update_logical_port(self.logical_device_id,
logical_port)
# Device
device = self.adapter_agent.get_device(self.device_id)
device.oper_status = oper_state
device.connect_status = connect_state
self.adapter_agent.update_device(device)
def indications_thread(self):
self.log.debug('starting-indications-thread')
self.log.debug('connecting to olt', device_id=self.device_id)
self.channel_ready_future.result() # blocking call
self.log.info('connected to olt', device_id=self.device_id)
self.go_state_connected()
self.indications = self.stub.EnableIndication(openolt_pb2.Empty())
while True:
try:
# get the next indication from olt
ind = next(self.indications)
except Exception as e:
self.log.warn('gRPC connection lost', error=e)
reactor.callFromThread(self.go_state_down)
reactor.callFromThread(self.go_state_init)
break
else:
self.log.debug("rx indication", indication=ind)
# indication handlers run in the main event loop
if ind.HasField('olt_ind'):
reactor.callFromThread(self.olt_indication, ind.olt_ind)
elif ind.HasField('intf_ind'):
reactor.callFromThread(self.intf_indication, ind.intf_ind)
elif ind.HasField('intf_oper_ind'):
reactor.callFromThread(self.intf_oper_indication,
ind.intf_oper_ind)
elif ind.HasField('onu_disc_ind'):
reactor.callFromThread(self.onu_discovery_indication,
ind.onu_disc_ind)
elif ind.HasField('onu_ind'):
reactor.callFromThread(self.onu_indication, ind.onu_ind)
elif ind.HasField('omci_ind'):
reactor.callFromThread(self.omci_indication, ind.omci_ind)
elif ind.HasField('pkt_ind'):
reactor.callFromThread(self.packet_indication, ind.pkt_ind)
elif ind.HasField('port_stats'):
reactor.callFromThread(
self.stats_mgr.port_statistics_indication,
ind.port_stats)
elif ind.HasField('flow_stats'):
reactor.callFromThread(
self.stats_mgr.flow_statistics_indication,
ind.flow_stats)
elif ind.HasField('alarm_ind'):
reactor.callFromThread(self.alarm_mgr.process_alarms,
ind.alarm_ind)
else:
self.log.warn('unknown indication type')
def olt_indication(self, olt_indication):
if olt_indication.oper_state == "up":
self.go_state_up()
elif olt_indication.oper_state == "down":
self.go_state_down()
def intf_indication(self, intf_indication):
self.log.debug("intf indication", intf_id=intf_indication.intf_id,
oper_state=intf_indication.oper_state)
if intf_indication.oper_state == "up":
oper_status = OperStatus.ACTIVE
else:
oper_status = OperStatus.DISCOVERED
# add_port update the port if it exists
self.add_port(intf_indication.intf_id, Port.PON_OLT, oper_status)
def intf_oper_indication(self, intf_oper_indication):
self.log.debug("Received interface oper state change indication",
intf_id=intf_oper_indication.intf_id,
type=intf_oper_indication.type,
oper_state=intf_oper_indication.oper_state)
if intf_oper_indication.oper_state == "up":
oper_state = OperStatus.ACTIVE
else:
oper_state = OperStatus.DISCOVERED
if intf_oper_indication.type == "nni":
# FIXME - creating logical port for 2nd interface throws exception!
if intf_oper_indication.intf_id != 0:
return
# add_(logical_)port update the port if it exists
port_no, label = self.add_port(intf_oper_indication.intf_id,
Port.ETHERNET_NNI, oper_state)
self.log.debug("int_oper_indication", port_no=port_no, label=label)
self.add_logical_port(port_no, intf_oper_indication.intf_id,
oper_state)
elif intf_oper_indication.type == "pon":
# FIXME - handle PON oper state change
pass
def onu_discovery_indication(self, onu_disc_indication):
intf_id = onu_disc_indication.intf_id
serial_number = onu_disc_indication.serial_number
serial_number_str = self.stringify_serial_number(serial_number)
self.log.debug("onu discovery indication", intf_id=intf_id,
serial_number=serial_number_str)
# Post ONU Discover alarm 20180809_0805
try:
OnuDiscoveryAlarm(self.alarm_mgr.alarms, pon_id=intf_id, serial_number=serial_number_str).raise_alarm()
except Exception as disc_alarm_error:
self.log.exception("onu-discovery-alarm-error", errmsg=disc_alarm_error.message)
# continue for now.
pir = self.bw_mgr.pir(serial_number_str)
self.log.debug("peak information rate", serial_number=serial_number,
pir=pir)
onu_device = self.adapter_agent.get_child_device(
self.device_id,
serial_number=serial_number_str)
if onu_device is None:
onu_id = self.new_onu_id(intf_id)
try:
self.add_onu_device(
intf_id,
platform.intf_id_to_port_no(intf_id, Port.PON_OLT),
onu_id, serial_number)
self.log.info("activate-onu", intf_id=intf_id, onu_id=onu_id,
serial_number=serial_number_str)
onu = openolt_pb2.Onu(intf_id=intf_id, onu_id=onu_id,
serial_number=serial_number, pir=pir)
self.stub.ActivateOnu(onu)
except Exception as e:
self.log.exception('onu-activation-failed', e=e)
else:
if onu_device.connect_status != ConnectStatus.REACHABLE:
onu_device.connect_status = ConnectStatus.REACHABLE
self.adapter_agent.update_device(onu_device)
onu_id = onu_device.proxy_address.onu_id
if onu_device.oper_status == OperStatus.DISCOVERED \
or onu_device.oper_status == OperStatus.ACTIVATING:
self.log.debug("ignore onu discovery indication, \
the onu has been discovered and should be \
activating shorlty", intf_id=intf_id,
onu_id=onu_id, state=onu_device.oper_status)
elif onu_device.oper_status == OperStatus.ACTIVE:
self.log.warn("onu discovery indication whereas onu is \
supposed to be active",
intf_id=intf_id, onu_id=onu_id,
state=onu_device.oper_status)
elif onu_device.oper_status == OperStatus.UNKNOWN:
self.log.info("onu in unknown state, recovering from olt \
reboot, activate onu", intf_id=intf_id,
onu_id=onu_id, serial_number=serial_number_str)
onu_device.oper_status = OperStatus.DISCOVERED
self.adapter_agent.update_device(onu_device)
onu = openolt_pb2.Onu(intf_id=intf_id, onu_id=onu_id,
serial_number=serial_number, pir=pir)
self.stub.ActivateOnu(onu)
else:
self.log.warn('unexpected state', onu_id=onu_id,
onu_device_oper_state=onu_device.oper_status)
def onu_indication(self, onu_indication):
self.log.debug("onu indication", intf_id=onu_indication.intf_id,
onu_id=onu_indication.onu_id,
serial_number=onu_indication.serial_number,
oper_state=onu_indication.oper_state,
admin_state=onu_indication.admin_state)
try:
serial_number_str = self.stringify_serial_number(
onu_indication.serial_number)
except Exception as e:
serial_number_str = None
if serial_number_str is not None:
onu_device = self.adapter_agent.get_child_device(
self.device_id,
serial_number=serial_number_str)
else:
onu_device = self.adapter_agent.get_child_device(
self.device_id,
parent_port_no=platform.intf_id_to_port_no(
onu_indication.intf_id, Port.PON_OLT),
onu_id=onu_indication.onu_id)
if onu_device is None:
self.log.error('onu not found', intf_id=onu_indication.intf_id,
onu_id=onu_indication.onu_id)
return
if onu_device.connect_status != ConnectStatus.REACHABLE:
onu_device.connect_status = ConnectStatus.REACHABLE
self.adapter_agent.update_device(onu_device)
if platform.intf_id_from_pon_port_no(onu_device.parent_port_no) \
!= onu_indication.intf_id:
self.log.warn('ONU-is-on-a-different-intf-id-now',
previous_intf_id=platform.intf_id_from_pon_port_no(
onu_device.parent_port_no),
current_intf_id=onu_indication.intf_id)
# FIXME - handle intf_id mismatch (ONU move?)
if onu_device.proxy_address.onu_id != onu_indication.onu_id:
# FIXME - handle onu id mismatch
self.log.warn('ONU-id-mismatch, can happen if both voltha and '
'the olt rebooted',
expected_onu_id=onu_device.proxy_address.onu_id,
received_onu_id=onu_indication.onu_id)
uni_no = platform.mk_uni_port_num(onu_indication.intf_id,
onu_indication.onu_id)
uni_name = self.port_name(uni_no, Port.ETHERNET_UNI,
serial_number=onu_device.serial_number)
self.log.debug('port-number-ready', uni_no=uni_no, uni_name=uni_name)
# Admin state
if onu_indication.admin_state == 'down':
if onu_indication.oper_state != 'down':
self.log.error('ONU-admin-state-down-and-oper-status-not-down',
oper_state=onu_indication.oper_state)
# Forcing the oper state change code to execute
onu_indication.oper_state = 'down'
# Port and logical port update is taken care of by oper state block
elif onu_indication.admin_state == 'up':
pass
else:
self.log.warn('Invalid-or-not-implemented-admin-state',
received_admin_state=onu_indication.admin_state)
self.log.debug('admin-state-dealt-with')
onu_adapter_agent = \
registry('adapter_loader').get_agent(onu_device.adapter)
if onu_adapter_agent is None:
self.log.error('onu_adapter_agent-could-not-be-retrieved',
onu_device=onu_device)
return
# Operating state
if onu_indication.oper_state == 'down':
# Move to discovered state
self.log.debug('onu-oper-state-is-down')
if onu_device.oper_status != OperStatus.DISCOVERED:
onu_device.oper_status = OperStatus.DISCOVERED
self.adapter_agent.update_device(onu_device)
# Set port oper state to Discovered
self.onu_ports_down(onu_device, uni_no, uni_name,
OperStatus.DISCOVERED)
if onu_device.adapter == 'brcm_openomci_onu':
self.log.debug('using-brcm_openomci_onu')
onu_adapter_agent.update_interface(onu_device, onu_indication)
elif onu_indication.oper_state == 'up':
if onu_device.oper_status != OperStatus.DISCOVERED:
self.log.debug("ignore onu indication",
intf_id=onu_indication.intf_id,
onu_id=onu_indication.onu_id,
state=onu_device.oper_status,
msg_oper_state=onu_indication.oper_state)
return
# Device was in Discovered state, setting it to active
# Prepare onu configuration
# If we are using the old/current broadcom adapter otherwise
# use the openomci adapter
if onu_device.adapter == 'broadcom_onu':
self.log.debug('using-broadcom_onu')
# onu initialization, base configuration (bridge setup ...)
def onu_initialization():
onu_adapter_agent.adapter.devices_handlers[onu_device.id] \
.message_exchange(cvid=DEFAULT_MGMT_VLAN)
self.log.debug('broadcom-message-exchange-started')
# tcont creation (onu)
tcont = TcontsConfigData()
tcont.alloc_id = platform.mk_alloc_id(onu_indication.onu_id)
# gem port creation
gem_port = GemportsConfigData()
gem_port.gemport_id = platform.mk_gemport_id(
onu_indication.onu_id)
# ports creation/update
def port_config():
# "v_enet" creation (olt)
# add_port update port when it exists
self.adapter_agent.add_port(
self.device_id,
Port(
port_no=uni_no,
label=uni_name,
type=Port.ETHERNET_UNI,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE))
# v_enet creation (onu)
venet = VEnetConfig(name=uni_name)
venet.interface.name = uni_name
onu_adapter_agent.create_interface(onu_device, venet)
# ONU device status update in the datastore
def onu_update_oper_status():
onu_device.oper_status = OperStatus.ACTIVE
onu_device.connect_status = ConnectStatus.REACHABLE
self.adapter_agent.update_device(onu_device)
# FIXME : the asynchronicity has to be taken care of properly
onu_initialization()
reactor.callLater(10, onu_adapter_agent.create_tcont,
device=onu_device, tcont_data=tcont,
traffic_descriptor_data=None)
reactor.callLater(11, onu_adapter_agent.create_gemport,
onu_device, gem_port)
reactor.callLater(12, port_config)
reactor.callLater(12, onu_update_oper_status)
elif onu_device.adapter == 'brcm_openomci_onu':
self.log.debug('using-brcm_openomci_onu')
# tcont creation (onu)
tcont = TcontsConfigData()
tcont.alloc_id = platform.mk_alloc_id(onu_indication.onu_id)
# gem port creation
gem_port = GemportsConfigData()
gem_port.gemport_id = platform.mk_gemport_id(
onu_indication.onu_id)
gem_port.tcont_ref = str(tcont.alloc_id)
self.log.info('inject-tcont-gem-data-onu-handler',
onu_indication=onu_indication, tcont=tcont,
gem_port=gem_port)
onu_adapter_agent.create_tcont(onu_device, tcont,
traffic_descriptor_data=None)
onu_adapter_agent.create_gemport(onu_device, gem_port)
onu_adapter_agent.create_interface(onu_device, onu_indication)
else:
self.log.error('unsupported-openolt-onu-adapter')
else:
self.log.warn('Not-implemented-or-invalid-value-of-oper-state',
oper_state=onu_indication.oper_state)
def onu_ports_down(self, onu_device, uni_no, uni_name, oper_state):
# Set port oper state to Discovered
# add port will update port if it exists
self.adapter_agent.add_port(
self.device_id,
Port(
port_no=uni_no,
label=uni_name,
type=Port.ETHERNET_UNI,
admin_state=onu_device.admin_state,
oper_status=oper_state))
# Disable logical port
onu_ports = self.proxy.get('devices/{}/ports'.format(onu_device.id))
onu_port_id = None
for onu_port in onu_ports:
if onu_port.port_no == uni_no:
onu_port_id = onu_port.label
if onu_port_id is None:
self.log.error('matching-onu-port-label-not-found',
onu_id=onu_device.id, olt_id=self.device_id,
onu_ports=onu_ports)
return
try:
onu_logical_port = self.adapter_agent.get_logical_port(
logical_device_id=self.logical_device_id, port_id=onu_port_id)
onu_logical_port.ofp_port.state = OFPPS_LINK_DOWN
self.adapter_agent.update_logical_port(
logical_device_id=self.logical_device_id,
port=onu_logical_port)
self.log.debug('cascading-oper-state-to-port-and-logical-port')
except KeyError as e:
self.log.error('matching-onu-port-label-invalid',
onu_id=onu_device.id, olt_id=self.device_id,
onu_ports=onu_ports, onu_port_id=onu_port_id,
error=e)
def omci_indication(self, omci_indication):
self.log.debug("omci indication", intf_id=omci_indication.intf_id,
onu_id=omci_indication.onu_id)
onu_device = self.adapter_agent.get_child_device(
self.device_id, onu_id=omci_indication.onu_id)
self.adapter_agent.receive_proxied_message(onu_device.proxy_address,
omci_indication.pkt)
def packet_indication(self, pkt_indication):
self.log.debug("packet indication", intf_id=pkt_indication.intf_id,
gemport_id=pkt_indication.gemport_id,
flow_id=pkt_indication.flow_id)
onu_id = platform.onu_id_from_gemport_id(pkt_indication.gemport_id)
logical_port_num = platform.mk_uni_port_num(pkt_indication.intf_id,
onu_id)
pkt = Ether(pkt_indication.pkt)
kw = dict(logical_device_id=self.logical_device_id,
logical_port_no=logical_port_num)
self.adapter_agent.send_packet_in(packet=str(pkt), **kw)
def packet_out(self, egress_port, msg):
pkt = Ether(msg)
self.log.info('packet out', egress_port=egress_port,
packet=str(pkt).encode("HEX"))
# Find port type
egress_port_type = self.port_type(egress_port)
if egress_port_type == Port.ETHERNET_UNI:
if pkt.haslayer(Dot1Q):
outer_shim = pkt.getlayer(Dot1Q)
if isinstance(outer_shim.payload, Dot1Q):
# If double tag, remove the outer tag
payload = (
Ether(src=pkt.src, dst=pkt.dst, type=outer_shim.type) /
outer_shim.payload
)
else:
payload = pkt
else:
payload = pkt
send_pkt = binascii.unhexlify(str(payload).encode("HEX"))
self.log.debug(
'sending-packet-to-ONU', egress_port=egress_port,
intf_id=platform.intf_id_from_uni_port_num(egress_port),
onu_id=platform.onu_id_from_port_num(egress_port),
packet=str(payload).encode("HEX"))
onu_pkt = openolt_pb2.OnuPacket(
intf_id=platform.intf_id_from_uni_port_num(egress_port),
onu_id=platform.onu_id_from_port_num(egress_port),
pkt=send_pkt)
self.stub.OnuPacketOut(onu_pkt)
elif egress_port_type == Port.ETHERNET_NNI:
self.log.debug('sending-packet-to-uplink', egress_port=egress_port,
packet=str(pkt).encode("HEX"))
send_pkt = binascii.unhexlify(str(pkt).encode("HEX"))
uplink_pkt = openolt_pb2.UplinkPacket(
intf_id=platform.intf_id_from_nni_port_num(egress_port),
pkt=send_pkt)
self.stub.UplinkPacketOut(uplink_pkt)
else:
self.log.warn('Packet-out-to-this-interface-type-not-implemented',
egress_port=egress_port,
port_type=egress_port_type)
def send_proxied_message(self, proxy_address, msg):
omci = openolt_pb2.OmciMsg(intf_id=proxy_address.channel_id,
onu_id=proxy_address.onu_id, pkt=str(msg))
self.stub.OmciMsgOut(omci)
def add_onu_device(self, intf_id, port_no, onu_id, serial_number):
self.log.info("Adding ONU", port_no=port_no, onu_id=onu_id,
serial_number=serial_number)
# NOTE - channel_id of onu is set to intf_id
proxy_address = Device.ProxyAddress(device_id=self.device_id,
channel_id=intf_id, onu_id=onu_id,
onu_session_id=onu_id)
self.log.debug("Adding ONU", proxy_address=proxy_address)
serial_number_str = self.stringify_serial_number(serial_number)
self.adapter_agent.add_onu_device(
parent_device_id=self.device_id, parent_port_no=port_no,
vendor_id=serial_number.vendor_id, proxy_address=proxy_address,
root=True, serial_number=serial_number_str,
admin_state=AdminState.ENABLED)
def port_name(self, port_no, port_type, intf_id=None, serial_number=None):
if port_type is Port.ETHERNET_NNI:
return "nni-" + str(port_no)
elif port_type is Port.PON_OLT:
return "pon" + str(intf_id)
elif port_type is Port.ETHERNET_UNI:
if serial_number is not None:
return serial_number
else:
return "uni-{}".format(port_no)
def port_type(self, port_no):
ports = self.adapter_agent.get_ports(self.device_id)
for port in ports:
if port.port_no == port_no:
return port.type
return None
def add_logical_port(self, port_no, intf_id, oper_state):
self.log.info('adding-logical-port', port_no=port_no)
label = self.port_name(port_no, Port.ETHERNET_NNI)
cap = OFPPF_1GB_FD | OFPPF_FIBER
curr_speed = OFPPF_1GB_FD
max_speed = OFPPF_1GB_FD
if oper_state == OperStatus.ACTIVE:
of_oper_state = OFPPS_LIVE
else:
of_oper_state = OFPPS_LINK_DOWN
ofp = ofp_port(
port_no=port_no,
hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % port_no),
name=label, config=0, state=of_oper_state, curr=cap,
advertised=cap, peer=cap, curr_speed=curr_speed,
max_speed=max_speed)
ofp_stats = ofp_port_stats(port_no=port_no)
logical_port = LogicalPort(
id=label, ofp_port=ofp, device_id=self.device_id,
device_port_no=port_no, root_port=True,
ofp_port_stats=ofp_stats)
self.adapter_agent.add_logical_port(self.logical_device_id,
logical_port)
def add_port(self, intf_id, port_type, oper_status):
port_no = platform.intf_id_to_port_no(intf_id, port_type)
label = self.port_name(port_no, port_type, intf_id)
self.log.debug('adding-port', port_no=port_no, label=label,
port_type=port_type)
port = Port(port_no=port_no, label=label, type=port_type,
admin_state=AdminState.ENABLED, oper_status=oper_status)
self.adapter_agent.add_port(self.device_id, port)
return port_no, label
def new_onu_id(self, intf_id):
onu_id = None
onu_devices = self.adapter_agent.get_child_devices(self.device_id)
for i in range(1, 512):
id_not_taken = True
for child_device in onu_devices:
if child_device.proxy_address.onu_id == i:
id_not_taken = False
break
if id_not_taken:
onu_id = i
break
return onu_id
def stringify_vendor_specific(self, vendor_specific):
return ''.join(str(i) for i in [
hex(ord(vendor_specific[0]) >> 4 & 0x0f)[2:],
hex(ord(vendor_specific[0]) & 0x0f)[2:],
hex(ord(vendor_specific[1]) >> 4 & 0x0f)[2:],
hex(ord(vendor_specific[1]) & 0x0f)[2:],
hex(ord(vendor_specific[2]) >> 4 & 0x0f)[2:],
hex(ord(vendor_specific[2]) & 0x0f)[2:],
hex(ord(vendor_specific[3]) >> 4 & 0x0f)[2:],
hex(ord(vendor_specific[3]) & 0x0f)[2:]])
def update_flow_table(self, flows):
if not self.is_state_up() and not self.is_state_connected():
self.log.info('OLT is down, ignore update flow table')
return
device = self.adapter_agent.get_device(self.device_id)
self.log.debug('update flow table', number_of_flows=len(flows))
for flow in flows:
is_down_stream = None
in_port = fd.get_in_port(flow)
assert in_port is not None
# Right now there is only one NNI port. Get the NNI PORT and
# compare with IN_PUT port number. Need to find better way.
ports = self.adapter_agent.get_ports(device.id, Port.ETHERNET_NNI)
for port in ports:
if (port.port_no == in_port):
self.log.debug('downstream-flow', in_port=in_port)
is_down_stream = True
break
if is_down_stream is None:
is_down_stream = False
self.log.debug('upstream-flow', in_port=in_port)
for flow in flows:
try:
self.flow_mgr.add_flow(flow, is_down_stream)
except grpc.RpcError as grpc_e:
if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
self.log.warn('flow already exists', e=grpc_e,
flow=flow)
else:
self.log.error('failed to add flow', flow=flow,
e=grpc_e)
except Exception as e:
self.log.error('failed to add flow', flow=flow, e=e)
def ip_hex(self, ip):
octets = ip.split(".")
hex_ip = []
for octet in octets:
octet_hex = hex(int(octet))
octet_hex = octet_hex.split('0x')[1]
octet_hex = octet_hex.rjust(2, '0')
hex_ip.append(octet_hex)
return ":".join(hex_ip)
def stringify_serial_number(self, serial_number):
return ''.join([serial_number.vendor_id,
self.stringify_vendor_specific(
serial_number.vendor_specific)])
def disable(self):
self.log.debug('sending-deactivate-olt-message',
device_id=self.device_id)
try:
# Send grpc call
self.stub.DisableOlt(openolt_pb2.Empty())
# The resulting indication will bring the OLT down
# self.go_state_down()
self.log.info('openolt device disabled')
except Exception as e:
self.log.error('Failure to disable openolt device', error=e)
def delete(self):
self.log.info('deleting-olt', device_id=self.device_id,
logical_device_id=self.logical_device_id)
try:
# Rebooting to reset the state
self.reboot()
# Removing logical device
self.proxy.remove('/logical_devices/{}'.
format(self.logical_device_id))
except Exception as e:
self.log.error('Failure to delete openolt device', error=e)
raise e
else:
self.log.info('successfully-deleted-olt', device_id=self.device_id)
def reenable(self):
self.log.debug('reenabling-olt', device_id=self.device_id)
try:
self.stub.ReenableOlt(openolt_pb2.Empty())
self.log.info('enabling-all-ports', device_id=self.device_id)
self.adapter_agent.enable_all_ports(self.device_id)
except Exception as e:
self.log.error('Failure to reenable openolt device', error=e)
else:
self.log.info('openolt device reenabled')
def disable_child_device(self, child_device):
self.log.debug('sending-disable-onu',
olt_device_id=self.device_id,
onu_device=child_device,
onu_serial_number=child_device.serial_number)
vendor_id = child_device.vendor_id.encode('hex')
vendor_specific = child_device.serial_number.replace(
child_device.vendor_id, '').encode('hex')
serial_number = openolt_pb2.SerialNumber(
vendor_id=vendor_id, vendor_specific=vendor_specific)
onu = openolt_pb2.Onu(intf_id=child_device.proxy_address.channel_id,
onu_id=child_device.proxy_address.onu_id,
serial_number=serial_number)
self.stub.DeactivateOnu(onu)
def delete_child_device(self, child_device):
self.log.debug('sending-deactivate-onu',
olt_device_id=self.device_id,
onu_device=child_device,
onu_serial_number=child_device.serial_number)
vendor_id = child_device.vendor_id.encode('hex')
vendor_specific = child_device.serial_number.replace(
child_device.vendor_id, '').encode('hex')
serial_number = openolt_pb2.SerialNumber(
vendor_id=vendor_id, vendor_specific=vendor_specific)
onu = openolt_pb2.Onu(intf_id=child_device.proxy_address.channel_id,
onu_id=child_device.proxy_address.onu_id,
serial_number=serial_number)
self.stub.DeleteOnu(onu)
def reboot(self):
self.log.debug('rebooting openolt device', device_id = self.device_id)
try:
self.stub.Reboot(openolt_pb2.Empty())
except Exception as e:
self.log.error('something went wrong with the reboot', error=e)
else:
self.log.info('device rebooted')
|
runtests.py
|
import os, sys, tempfile, shutil, signal
from getopt import getopt
from optparse import OptionParser
import multiprocessing, Queue
from test import *
parser = OptionParser(
usage="Usage: %prog [options] tests...",
description="Run specified tests on the project. "
"By default, all tests are run.")
parser.add_option("-l", "--list",
action="store_true", dest="print_list", default=False,
help="Print a list of all available tests and their descriptions")
parser.add_option("-c", "--continue", action="store_true", dest="_continue",
default=False, help="Continue testing after a test failure")
parser.add_option("-n", "--no-copy", action="store_false", dest="local",
default=True,
help="By default, the project is copied to a local temp directory "
"before testing to avoid the poor performance of AFS. This option "
"disables that behavior. This option along with --stop may be "
"useful for debugging, since the project can be examined after a "
"failed test")
parser.add_option("-p", "--project-path", dest="project_path", default=".",
help="Path to the directory containing the project to be tested "
"(default: current directory)")
parser.add_option("-t", "--test-path", dest="tester_path", default="tests",
help="Path to the location of the test files")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
default=False)
parser.add_option("-m", "--no-timeout", action="store_true", dest="notimeout",
default=False, help="Ignore timeouts on tests")
parser.add_option("-g", "--gdb", action="store_true", dest="gdb",
default=False, help="Run project executable inside a gdb session. " +
"implies -m")
parser.add_option("-v", "--valgrind", action="store_true", dest="valgrind",
default=False, help="Run project executable inside a valgrind session. ")
parser.add_option("-b", "--no-build", action="store_false", dest="build",
default=True,
help="do not automatically run build test before running other tests")
parser.add_option("-f", "--factor", dest="factor", default=1,
help="multiply all timeout lengths by FACTOR")
def main(build_test, all_tests):
(options, args) = parser.parse_args()
if options.gdb:
options.notimeout = True
tempdir = None
if options.local:
tempdir = tempfile.mkdtemp()
if not options.print_list:
shutil.copytree(src=options.project_path, dst=tempdir + "/p", symlinks=True)
project_path = tempdir + "/p"
else:
project_path = options.project_path
log = sys.stdout
if options.quiet:
log = open("/dev/null", "w")
_list = list()
if options.build:
_list.append(build_test)
if len(args) == 0:
_list.extend(all_tests)
for test_name in args:
if test_name == "all":
_list.extend(all_tests)
if test_name == "build":
_list.append(build_test)
else:
match = None
for test in all_tests:
if test.name == test_name:
match = test
break
if match is not None:
_list.append(match)
else:
sys.stderr.write(test_name + " is not a valid test\n")
exit(2)
if options.print_list:
for test in _list:
print test.name, "-", test.description
sys.exit(0)
ran = list()
tests_passed = 0
points = 0
total_points = 0
tests_skipped = 0
quitnow = False
for tester in _list:
test = tester(project_path, log=log, use_gdb=options.gdb,
use_valgrind=options.valgrind, test_path=options.tester_path)
log.write("\n")
log.write("*" * 70 + "\n")
log.write("Start running test " + test.name + "\n")
log.write(test.description + "\n")
log.write("\n")
#log.write("*" * 70 + "\n")
log.flush()
# run the test in a new process
result_queue = multiprocessing.Queue()
p = multiprocessing.Process(target=run_test, args=(test,result_queue))
p.start()
if options.notimeout or test.timeout is None:
timeout = None
else:
timeout = test.timeout * float(options.factor)
try:
# wait for the test result
result = result_queue.get(block=True, timeout=timeout)
p.join()
except Queue.Empty:
test.fail("Timelimit (" + str(timeout) + "s) exceeded")
result = test
except KeyboardInterrupt:
test.fail("User interrupted test")
result = test
quitnow = True
finally:
try:
#os.killpg(os.getpgid(p.pid), signal.SIGTERM)
os.kill(p.pid, signal.SIGTERM)
except OSError as e:
pass
result_queue.close()
try:
result.logfd = log
result.after()
except Exception as e:
(exception_type, value, tb) = sys.exc_info()
traceback.print_exception(exception_type, value, tb)
ran.append(result)
total_points += test.points()
log.flush()
if not result.is_failed():
points += test.points()
tests_passed += 1
log.write("\n")
log.write("\n")
log.write(str(result))
log.write("\n")
log.write("Finished running test " + test.name + "\n")
log.write("^" * 70 + "\n")
if result.is_failed() and not options._continue or quitnow:
log.write("Skipped " + str(len(_list) - len(ran)) + " tests.\n")
log.write("To keep testing after failing a test, use flag '-c' or '--continue'\n")
sys.exit(1)
log.write("\n")
log.write("\n")
log.write("*" * 70 + "\n")
log.write("*" * 70 + "\n")
log.write("** SUMMARY **\n")
log.write("*" * 70 + "\n")
log.write("*" * 70 + "\n")
log.write("\n")
for test in ran:
log.write(str(test) + "\n")
log.write("Passed " + str(tests_passed) + " of " + str(len(ran)) +
" tests.\n")
log.write("Overall " + str(tests_passed) + " of " + str(len(_list)) + "\n")
if total_points > 0:
log.write("Points " + str(points) + " of " + str(total_points) + "\n")
if options.quiet:
for test in ran:
print str(test)
print "Overall " + str(tests_passed) + " of " + str(len(_list))
if total_points > 0:
print "Points " + str(points) + " of " + str(total_points)
if tempdir is not None:
shutil.rmtree(tempdir)
if tests_passed == len(_list):
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main()
|
test_utilities.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from grpc._cython import cygrpc
class SimpleFuture(object):
"""A simple future mechanism."""
def __init__(self, function, *args, **kwargs):
def wrapped_function():
try:
self._result = function(*args, **kwargs)
except Exception as error:
self._error = error
self._result = None
self._error = None
self._thread = threading.Thread(target=wrapped_function)
self._thread.start()
def result(self):
"""The resulting value of this future.
Re-raises any exceptions.
"""
self._thread.join()
if self._error:
# TODO(atash): re-raise exceptions in a way that preserves tracebacks
raise self._error
return self._result
class CompletionQueuePollFuture(SimpleFuture):
def __init__(self, completion_queue, deadline):
super(CompletionQueuePollFuture,
self).__init__(lambda: completion_queue.poll(deadline))
|
nameserver.py
|
import json
import logging
import threading
from pathlib import Path
import Pyro4.naming
logger = logging.getLogger(__name__)
def start_nameserver(ns_ip: str = '127.0.0.1',
ns_port: int = 0,
credentials_file: Path = Path.cwd() / f'HPBenchExpUtils_pyro4_nameserver_0.json',
thread_name: str = 'HPOBenchExpUtils'):
""" ns_port = 0 means a random port """
# Let the nameserver clear its registrations automatically every X seconds
from Pyro4.configuration import config
config.NS_AUTOCLEAN = 30
try:
uri, ns, _ = Pyro4.naming.startNS(host=ns_ip, port=ns_port)
except OSError as e:
logger.warning('Nameserver is already in use.')
raise e
ns_ip, ns_port = uri.location.split(':')
ns_port = int(ns_port)
# save credentials to file
with credentials_file.open('w') as fh:
json.dump([ns_ip, ns_port], fh)
logger.debug(f'The credentials file is here: {credentials_file}')
thread = threading.Thread(target=ns.requestLoop, name=thread_name, daemon=True)
thread.start()
logger.info(f'The nameserver is running on {ns_ip}:{ns_port}')
return ns_ip, ns_port
|
2.4-2.py
|
import threading
import time
import requests
def download_url(url):
print("Downloading the contents of {} from {}".format(url, threading.current_thread().name))
requests.get(url)
print("Download of {} done".format(url))
if __name__ == "__main__":
threads = []
test_dict = {
"Google": "http://www.google.com",
"Python": "http://www.python.org",
"Bing": "http://www.bing.com",
"Yahoo": "http://www.yahoo.com"
}
for key in test_dict:
threads = [threading.Thread(target=download_url, args=(test_dict[key],)) for key in test_dict]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
context_test.py
|
# -*- coding: utf-8 -*-
'''
tests.unit.context_test
~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import tornado.stack_context
import tornado.gen
from tornado.testing import AsyncTestCase, gen_test
import threading
import time
# Import Salt Testing libs
from salttesting import TestCase
from salt.ext.six.moves import range
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
from salt.utils.context import ContextDict, NamespacedDictWrapper
class ContextDictTests(AsyncTestCase):
# how many threads/coroutines to run at a time
num_concurrent_tasks = 5
def setUp(self):
super(ContextDictTests, self).setUp()
self.cd = ContextDict()
# set a global value
self.cd['foo'] = 'global'
def test_threads(self):
'''Verify that ContextDict overrides properly within threads
'''
rets = []
def tgt(x, s):
inner_ret = []
over = self.cd.clone()
inner_ret.append(self.cd.get('foo'))
with over:
inner_ret.append(over.get('foo'))
over['foo'] = x
inner_ret.append(over.get('foo'))
time.sleep(s)
inner_ret.append(over.get('foo'))
rets.append(inner_ret)
threads = []
for x in range(0, self.num_concurrent_tasks):
s = self.num_concurrent_tasks - x
t = threading.Thread(target=tgt, args=(x, s))
t.start()
threads.append(t)
for t in threads:
t.join()
for r in rets:
self.assertEqual(r[0], r[1])
self.assertEqual(r[2], r[3])
@gen_test
def test_coroutines(self):
'''Verify that ContextDict overrides properly within coroutines
'''
@tornado.gen.coroutine
def secondary_coroutine(over):
raise tornado.gen.Return(over.get('foo'))
@tornado.gen.coroutine
def tgt(x, s, over):
inner_ret = []
# first grab the global
inner_ret.append(self.cd.get('foo'))
# grab the child's global (should match)
inner_ret.append(over.get('foo'))
# override the global
over['foo'] = x
inner_ret.append(over.get('foo'))
# sleep for some time to let other coroutines do this section of code
yield tornado.gen.sleep(s)
# get the value of the global again.
inner_ret.append(over.get('foo'))
# Call another coroutine to verify that we keep our context
r = yield secondary_coroutine(over)
inner_ret.append(r)
raise tornado.gen.Return(inner_ret)
futures = []
for x in range(0, self.num_concurrent_tasks):
s = self.num_concurrent_tasks - x
over = self.cd.clone()
f = tornado.stack_context.run_with_stack_context(
tornado.stack_context.StackContext(lambda: over), # pylint: disable=W0640
lambda: tgt(x, s/5.0, over), # pylint: disable=W0640
)
futures.append(f)
wait_iterator = tornado.gen.WaitIterator(*futures)
while not wait_iterator.done():
r = yield next(wait_iterator)
self.assertEqual(r[0], r[1]) # verify that the global value remails
self.assertEqual(r[2], r[3]) # verify that the override sticks locally
self.assertEqual(r[3], r[4]) # verify that the override sticks across coroutines
def test_basic(self):
'''Test that the contextDict is a dict
'''
# ensure we get the global value
self.assertEqual(
dict(self.cd),
{'foo': 'global'},
)
def test_override(self):
over = self.cd.clone()
over['bar'] = 'global'
self.assertEqual(
dict(over),
{'foo': 'global', 'bar': 'global'},
)
self.assertEqual(
dict(self.cd),
{'foo': 'global'},
)
with over:
self.assertEqual(
dict(over),
{'foo': 'global', 'bar': 'global'},
)
self.assertEqual(
dict(self.cd),
{'foo': 'global', 'bar': 'global'},
)
over['bar'] = 'baz'
self.assertEqual(
dict(over),
{'foo': 'global', 'bar': 'baz'},
)
self.assertEqual(
dict(self.cd),
{'foo': 'global', 'bar': 'baz'},
)
self.assertEqual(
dict(over),
{'foo': 'global', 'bar': 'baz'},
)
self.assertEqual(
dict(self.cd),
{'foo': 'global'},
)
def test_multiple_contexts(self):
cds = []
for x in range(0, 10):
cds.append(self.cd.clone(bar=x))
for x, cd in enumerate(cds):
self.assertNotIn('bar', self.cd)
with cd:
self.assertEqual(
dict(self.cd),
{'bar': x, 'foo': 'global'},
)
self.assertNotIn('bar', self.cd)
class NamespacedDictWrapperTests(TestCase):
PREFIX = 'prefix'
def setUp(self):
self._dict = {}
def test_single_key(self):
self._dict['prefix'] = {'foo': 'bar'}
w = NamespacedDictWrapper(self._dict, 'prefix')
self.assertEqual(w['foo'], 'bar')
def test_multiple_key(self):
self._dict['prefix'] = {'foo': {'bar': 'baz'}}
w = NamespacedDictWrapper(self._dict, ('prefix', 'foo'))
self.assertEqual(w['bar'], 'baz')
|
__init__.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Implements context management so that nested/scoped contexts and threaded
contexts work properly and as expected.
"""
from __future__ import absolute_import
import collections
import functools
import logging
import os
import platform
import socket
import stat
import string
import subprocess
import sys
import threading
import time
import socks
from pwnlib.config import register_config
from pwnlib.device import Device
from pwnlib.timeout import Timeout
__all__ = ['context', 'ContextType', 'Thread']
_original_socket = socket.socket
class _devnull(object):
name = None
def write(self, *a, **kw): pass
def read(self, *a, **kw): return ''
def flush(self, *a, **kw): pass
def close(self, *a, **kw): pass
class _defaultdict(dict):
"""
Dictionary which loads missing keys from another dictionary.
This is neccesary because the ``default_factory`` method of
:class:`collections.defaultdict` does not provide the key.
Examples:
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['foo']
'bar'
>>> 'foo' in b
False
>>> b['foo'] = 'baz'
>>> b['foo']
'baz'
>>> del b['foo']
>>> b['foo']
'bar'
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['baz'] #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'baz'
"""
def __init__(self, default=None):
super(_defaultdict, self).__init__()
if default is None:
default = {}
self.default = default
def __missing__(self, key):
return self.default[key]
class _DictStack(object):
"""
Manages a dictionary-like object, permitting saving and restoring from
a stack of states via :func:`push` and :func:`pop`.
The underlying object used as ``default`` must implement ``copy``, ``clear``,
and ``update``.
Examples:
>>> t = pwnlib.context._DictStack(default={})
>>> t['key'] = 'value'
>>> t
{'key': 'value'}
>>> t.push()
>>> t
{'key': 'value'}
>>> t['key'] = 'value2'
>>> t
{'key': 'value2'}
>>> t.pop()
>>> t
{'key': 'value'}
"""
def __init__(self, default):
self._current = _defaultdict(default)
self.__stack = []
def push(self):
self.__stack.append(self._current.copy())
def pop(self):
self._current.clear()
self._current.update(self.__stack.pop())
def copy(self):
return self._current.copy()
# Pass-through container emulation routines
def __len__(self): return self._current.__len__()
def __delitem__(self, k): return self._current.__delitem__(k)
def __getitem__(self, k): return self._current.__getitem__(k)
def __setitem__(self, k, v): return self._current.__setitem__(k, v)
def __contains__(self, k): return self._current.__contains__(k)
def __iter__(self): return self._current.__iter__()
def __repr__(self): return self._current.__repr__()
def __eq__(self, other): return self._current.__eq__(other)
# Required for keyword expansion operator ** to work
def keys(self): return self._current.keys()
def values(self): return self._current.values()
def items(self): return self._current.items()
class _Tls_DictStack(threading.local, _DictStack):
"""
Per-thread implementation of :class:`_DictStack`.
Examples:
>>> t = pwnlib.context._Tls_DictStack({})
>>> t['key'] = 'value'
>>> print t
{'key': 'value'}
>>> def p(): print t
>>> thread = threading.Thread(target=p)
>>> _ = (thread.start(), thread.join())
{}
"""
pass
def _validator(validator):
"""
Validator that tis tightly coupled to the implementation
of the classes here.
This expects that the object has a ._tls property which
is of type _DictStack.
"""
name = validator.__name__
doc = validator.__doc__
def fget(self):
return self._tls[name]
def fset(self, val):
self._tls[name] = validator(self, val)
def fdel(self):
self._tls._current.pop(name,None)
return property(fget, fset, fdel, doc)
class Thread(threading.Thread):
"""
Instantiates a context-aware thread, which inherit its context when it is
instantiated. The class can be accessed both on the context module as
`pwnlib.context.Thread` and on the context singleton object inside the
context module as `pwnlib.context.context.Thread`.
Threads created by using the native :class`threading`.Thread` will have a
clean (default) context.
Regardless of the mechanism used to create any thread, the context
is de-coupled from the parent thread, so changes do not cascade
to child or parent.
Saves a copy of the context when instantiated (at ``__init__``)
and updates the new thread's context before passing control
to the user code via ``run`` or ``target=``.
Examples:
>>> context.clear()
>>> context.update(arch='arm')
>>> def p():
... print context.arch
... context.arch = 'mips'
... print context.arch
>>> # Note that a normal Thread starts with a clean context
>>> # (i386 is the default architecture)
>>> t = threading.Thread(target=p)
>>> _=(t.start(), t.join())
i386
mips
>>> # Note that the main Thread's context is unchanged
>>> print context.arch
arm
>>> # Note that a context-aware Thread receives a copy of the context
>>> t = pwnlib.context.Thread(target=p)
>>> _=(t.start(), t.join())
arm
mips
>>> # Again, the main thread is unchanged
>>> print context.arch
arm
Implementation Details:
This class implemented by hooking the private function
:func:`threading.Thread._Thread_bootstrap`, which is called before
passing control to :func:`threading.Thread.run`.
This could be done by overriding ``run`` itself, but we would have to
ensure that all uses of the class would only ever use the keyword
``target=`` for ``__init__``, or that all subclasses invoke
``super(Subclass.self).set_up_context()`` or similar.
"""
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self.old = context.copy()
def __bootstrap(self):
"""
Implementation Details:
This only works because the class is named ``Thread``.
If its name is changed, we have to implement this hook
differently.
"""
context.update(**self.old)
super(Thread, self).__bootstrap()
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> pwnlib.context._longest(data) == data
True
>>> for i in pwnlib.context._longest(data):
... print i
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True))
def TlsProperty(object):
def __get__(self, obj, objtype=None):
return obj._tls
class ContextType(object):
r"""
Class for specifying information about the target machine.
Intended for use as a pseudo-singleton through the global
variable :data:`.context`, available via
``from pwn import *`` as ``context``.
The context is usually specified at the top of the Python file for clarity. ::
#!/usr/bin/env python
context.update(arch='i386', os='linux')
Currently supported properties and their defaults are listed below.
The defaults are inherited from :data:`pwnlib.context.ContextType.defaults`.
Additionally, the context is thread-aware when using
:class:`pwnlib.context.Thread` instead of :class:`threading.Thread`
(all internal ``pwntools`` threads use the former).
The context is also scope-aware by using the ``with`` keyword.
Examples:
>>> context.clear()
>>> context.update(os='linux') # doctest: +ELLIPSIS
>>> context.os == 'linux'
True
>>> context.arch = 'arm'
>>> vars(context) == {'arch': 'arm', 'bits': 32, 'endian': 'little', 'os': 'linux'}
True
>>> context.endian
'little'
>>> context.bits
32
>>> def nop():
... print pwnlib.asm.asm('nop').encode('hex')
>>> nop()
00f020e3
>>> with context.local(arch = 'i386'):
... nop()
90
>>> from pwnlib.context import Thread as PwnThread
>>> from threading import Thread as NormalThread
>>> with context.local(arch = 'mips'):
... pwnthread = PwnThread(target=nop)
... thread = NormalThread(target=nop)
>>> # Normal thread uses the default value for arch, 'i386'
>>> _=(thread.start(), thread.join())
90
>>> # Pwnthread uses the correct context from creation-time
>>> _=(pwnthread.start(), pwnthread.join())
00000000
>>> nop()
00f020e3
"""
#
# Use of 'slots' is a heavy-handed way to prevent accidents
# like 'context.architecture=' instead of 'context.arch='.
#
# Setting any properties on a ContextType object will throw an
# exception.
#
__slots__ = '_tls',
#: Default values for :class:`pwnlib.context.ContextType`
defaults = {
'adb_host': 'localhost',
'adb_port': 5037,
'arch': 'i386',
'aslr': True,
'binary': None,
'bits': 32,
'buffer_size': 4096,
'delete_corefiles': False,
'device': os.getenv('ANDROID_SERIAL', None) or None,
'endian': 'little',
'kernel': None,
'log_level': logging.INFO,
'log_file': _devnull(),
'log_console': sys.stdout,
'randomize': False,
'rename_corefiles': True,
'newline': '\n',
'noptrace': False,
'os': 'linux',
'proxy': None,
'signed': False,
'terminal': tuple(),
'timeout': Timeout.maximum,
}
#: Valid values for :meth:`pwnlib.context.ContextType.os`
oses = sorted(('linux','freebsd','windows','cgc','android'))
big_32 = {'endian': 'big', 'bits': 32}
big_64 = {'endian': 'big', 'bits': 64}
little_8 = {'endian': 'little', 'bits': 8}
little_16 = {'endian': 'little', 'bits': 16}
little_32 = {'endian': 'little', 'bits': 32}
little_64 = {'endian': 'little', 'bits': 64}
#: Keys are valid values for :meth:`pwnlib.context.ContextType.arch`.
#
#: Values are defaults which are set when
#: :attr:`pwnlib.context.ContextType.arch` is set
architectures = _longest({
'aarch64': little_64,
'alpha': little_64,
'avr': little_8,
'amd64': little_64,
'arm': little_32,
'cris': little_32,
'i386': little_32,
'ia64': big_64,
'm68k': big_32,
'mips': little_32,
'mips64': little_64,
'msp430': little_16,
'powerpc': big_32,
'powerpc64': big_64,
's390': big_32,
'sparc': big_32,
'sparc64': big_64,
'thumb': little_32,
'vax': little_32,
})
#: Valid values for :attr:`endian`
endiannesses = _longest({
'be': 'big',
'eb': 'big',
'big': 'big',
'le': 'little',
'el': 'little',
'little': 'little'
})
#: Valid string values for :attr:`signed`
signednesses = {
'unsigned': False,
'no': False,
'yes': True,
'signed': True
}
valid_signed = sorted(signednesses)
def __init__(self, **kwargs):
"""
Initialize the ContextType structure.
All keyword arguments are passed to :func:`update`.
"""
self._tls = _Tls_DictStack(_defaultdict(ContextType.defaults))
self.update(**kwargs)
def copy(self):
"""copy() -> dict
Returns a copy of the current context as a dictionary.
Examples:
>>> context.clear()
>>> context.os = 'linux'
>>> vars(context) == {'os': 'linux'}
True
"""
return self._tls.copy()
@property
def __dict__(self):
return self.copy()
def update(self, *args, **kwargs):
"""
Convenience function, which is shorthand for setting multiple
variables at once.
It is a simple shorthand such that::
context.update(os = 'linux', arch = 'arm', ...)
is equivalent to::
context.os = 'linux'
context.arch = 'arm'
...
The following syntax is also valid::
context.update({'os': 'linux', 'arch': 'arm'})
Arguments:
kwargs: Variables to be assigned in the environment.
Examples:
>>> context.clear()
>>> context.update(arch = 'i386', os = 'linux')
>>> context.arch, context.os
('i386', 'linux')
"""
for arg in args:
self.update(**arg)
for k,v in kwargs.items():
setattr(self,k,v)
def __repr__(self):
v = sorted("%s = %r" % (k,v) for k,v in self._tls._current.items())
return '%s(%s)' % (self.__class__.__name__, ', '.join(v))
def local(self, function=None, **kwargs):
"""local(**kwargs) -> context manager
Create a context manager for use with the ``with`` statement.
For more information, see the example below or PEP 343.
Arguments:
kwargs: Variables to be assigned in the new environment.
Returns:
ContextType manager for managing the old and new environment.
Examples:
>>> context.clear()
>>> context.timeout = 1
>>> context.timeout == 1
True
>>> print context.timeout
1.0
>>> with context.local(timeout = 2):
... print context.timeout
... context.timeout = 3
... print context.timeout
2.0
3.0
>>> print context.timeout
1.0
"""
class LocalContext(object):
def __enter__(a):
self._tls.push()
self.update(**{k:v for k,v in kwargs.items() if v is not None})
return self
def __exit__(a, *b, **c):
self._tls.pop()
def __call__(self, function, *a, **kw):
@functools.wraps(function)
def inner(*a, **kw):
with self:
return function(*a, **kw)
return inner
return LocalContext()
@property
def silent(self, function=None):
"""Disable all non-error logging within the enclosed scope.
"""
return self.local(function, log_level='error')
@property
def quiet(self, function=None):
"""Disables all non-error logging within the enclosed scope,
*unless* the debugging level is set to 'debug' or lower."""
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
return self.local(function, log_level=level)
def quietfunc(self, function):
"""Similar to :attr:`quiet`, but wraps a whole function."""
@functools.wraps(function)
def wrapper(*a, **kw):
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
with self.local(function, log_level=level):
return function(*a, **kw)
return wrapper
@property
def verbose(self):
"""Enable all logging within the enclosed scope.
"""
return self.local(log_level='debug')
def clear(self, *a, **kw):
"""
Clears the contents of the context.
All values are set to their defaults.
Arguments:
a: Arguments passed to ``update``
kw: Arguments passed to ``update``
Examples:
>>> # Default value
>>> context.clear()
>>> context.arch == 'i386'
True
>>> context.arch = 'arm'
>>> context.arch == 'i386'
False
>>> context.clear()
>>> context.arch == 'i386'
True
"""
self._tls._current.clear()
if a or kw:
self.update(*a, **kw)
@property
def native(self):
if context.os in ('android', 'cgc'):
return False
arch = context.arch
with context.local(arch = platform.machine()):
platform_arch = context.arch
if arch in ('i386', 'amd64') and platform_arch in ('i386', 'amd64'):
return True
return arch == platform_arch
@_validator
def arch(self, arch):
"""
Target binary architecture.
Allowed values are listed in :attr:`pwnlib.context.ContextType.architectures`.
Side Effects:
If an architecture is specified which also implies additional
attributes (e.g. 'amd64' implies 64-bit words, 'powerpc' implies
big-endian), these attributes will be set on the context if a
user has not already set a value.
The following properties may be modified.
- :attr:`bits`
- :attr:`endian`
Raises:
AttributeError: An invalid architecture was specified
Examples:
>>> context.clear()
>>> context.arch == 'i386' # Default architecture
True
>>> context.arch = 'mips'
>>> context.arch == 'mips'
True
>>> context.arch = 'doge' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: arch must be one of ['aarch64', ..., 'thumb']
>>> context.arch = 'ppc'
>>> context.arch == 'powerpc' # Aliased architecture
True
>>> context.clear()
>>> context.bits == 32 # Default value
True
>>> context.arch = 'amd64'
>>> context.bits == 64 # New value
True
Note that expressly setting :attr:`bits` means that we use
that value instead of the default
>>> context.clear()
>>> context.bits = 32
>>> context.arch = 'amd64'
>>> context.bits == 32
True
Setting the architecture can override the defaults for
both :attr:`endian` and :attr:`bits`
>>> context.clear()
>>> context.arch = 'powerpc64'
>>> vars(context) == {'arch': 'powerpc64', 'bits': 64, 'endian': 'big'}
True
"""
# Lowercase
arch = arch.lower()
# Attempt to perform convenience and legacy compatibility transformations.
# We have to make sure that x86_64 appears before x86 for this to work correctly.
transform = [('ppc64', 'powerpc64'),
('ppc', 'powerpc'),
('x86_64', 'amd64'),
('x86', 'i386'),
('i686', 'i386'),
('armeabi', 'arm'),
('arm64', 'aarch64')]
for k, v in transform:
if arch.startswith(k):
arch = v
break
try:
defaults = ContextType.architectures[arch]
except KeyError:
raise AttributeError('AttributeError: arch must be one of %r' % sorted(ContextType.architectures))
for k,v in ContextType.architectures[arch].items():
if k not in self._tls:
self._tls[k] = v
return arch
@_validator
def aslr(self, aslr):
"""
ASLR settings for new processes.
If :const:`False`, attempt to disable ASLR in all processes which are
created via ``personality`` (``setarch -R``) and ``setrlimit``
(``ulimit -s unlimited``).
The ``setarch`` changes are lost if a ``setuid`` binary is executed.
"""
return bool(aslr)
@_validator
def kernel(self, arch):
"""
Target machine's kernel architecture.
Usually, this is the same as ``arch``, except when
running a 32-bit binary on a 64-bit kernel (e.g. i386-on-amd64).
Even then, this doesn't matter much -- only when the the segment
registers need to be known
"""
with context.local(arch=arch):
return context.arch
@_validator
def bits(self, bits):
"""
Target machine word size, in bits (i.e. the size of general purpose registers).
The default value is ``32``, but changes according to :attr:`arch`.
Examples:
>>> context.clear()
>>> context.bits == 32
True
>>> context.bits = 64
>>> context.bits == 64
True
>>> context.bits = -1 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (-1)
"""
bits = int(bits)
if bits <= 0:
raise AttributeError("bits must be > 0 (%r)" % bits)
return bits
@_validator
def binary(self, binary):
"""
Infer target architecture, bit-with, and endianness from a binary file.
Data type is a :class:`pwnlib.elf.ELF` object.
Examples:
>>> context.clear()
>>> context.arch, context.bits
('i386', 32)
>>> context.binary = '/bin/bash'
>>> context.arch, context.bits
('amd64', 64)
>>> context.binary
ELF('/bin/bash')
"""
# Cyclic imports... sorry Idolf.
from pwnlib.elf import ELF
if not isinstance(binary, ELF):
binary = ELF(binary)
self.arch = binary.arch
self.bits = binary.bits
self.endian = binary.endian
return binary
@property
def bytes(self):
"""
Target machine word size, in bytes (i.e. the size of general purpose registers).
This is a convenience wrapper around ``bits / 8``.
Examples:
>>> context.bytes = 1
>>> context.bits == 8
True
>>> context.bytes = 0 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (0)
"""
return self.bits/8
@bytes.setter
def bytes(self, value):
self.bits = value*8
@_validator
def endian(self, endianness):
"""
Endianness of the target machine.
The default value is ``'little'``, but changes according to :attr:`arch`.
Raises:
AttributeError: An invalid endianness was provided
Examples:
>>> context.clear()
>>> context.endian == 'little'
True
>>> context.endian = 'big'
>>> context.endian
'big'
>>> context.endian = 'be'
>>> context.endian == 'big'
True
>>> context.endian = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: endian must be one of ['be', 'big', 'eb', 'el', 'le', 'little']
"""
endian = endianness.lower()
if endian not in ContextType.endiannesses:
raise AttributeError("endian must be one of %r" % sorted(ContextType.endiannesses))
return ContextType.endiannesses[endian]
@_validator
def log_level(self, value):
"""
Sets the verbosity of ``pwntools`` logging mechanism.
More specifically it controls the filtering of messages that happens
inside the handler for logging to the screen. So if you want e.g. log
all messages to a file, then this attribute makes no difference to you.
Valid values are specified by the standard Python ``logging`` module.
Default value is set to ``INFO``.
Examples:
>>> context.log_level = 'error'
>>> context.log_level == logging.ERROR
True
>>> context.log_level = 10
>>> context.log_level = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING']
"""
# If it can be converted into an int, success
try: return int(value)
except ValueError: pass
# If it is defined in the logging module, success
try: return getattr(logging, value.upper())
except AttributeError: pass
# Otherwise, fail
level_names = filter(lambda x: isinstance(x,str), logging._levelNames)
permitted = sorted(level_names)
raise AttributeError('log_level must be an integer or one of %r' % permitted)
@_validator
def log_file(self, value):
r"""
Sets the target file for all logging output.
Works in a similar fashion to :attr:`log_level`.
Examples:
>>> context.log_file = 'foo.txt' #doctest: +ELLIPSIS
>>> log.debug('Hello!') #doctest: +ELLIPSIS
>>> with context.local(log_level='ERROR'): #doctest: +ELLIPSIS
... log.info('Hello again!')
>>> with context.local(log_file='bar.txt'):
... log.debug('Hello from bar!')
>>> log.info('Hello from foo!')
>>> file('foo.txt').readlines()[-3] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello!\n'
>>> file('foo.txt').readlines()[-2] #doctest: +ELLIPSIS
'...:INFO:...:Hello again!\n'
>>> file('foo.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:INFO:...:Hello from foo!\n'
>>> file('bar.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello from bar!\n'
"""
if isinstance(value, (str,unicode)):
modes = ('w', 'wb', 'a', 'ab')
# check if mode was specified as "[value],[mode]"
if ',' not in value:
value += ',a'
filename, mode = value.rsplit(',', 1)
value = open(filename, mode)
elif not isinstance(value, (file)):
raise AttributeError('log_file must be a file')
# Is this the same file we already have open?
# If so, don't re-print the banner.
if self.log_file and not isinstance(self.log_file, _devnull):
a = os.fstat(value.fileno()).st_ino
b = os.fstat(self.log_file.fileno()).st_ino
if a == b:
return self.log_file
iso_8601 = '%Y-%m-%dT%H:%M:%S'
lines = [
'=' * 78,
' Started at %s ' % time.strftime(iso_8601),
' sys.argv = [',
]
for arg in sys.argv:
lines.append(' %r,' % arg)
lines.append(' ]')
lines.append('=' * 78)
for line in lines:
value.write('=%-78s=\n' % line)
value.flush()
return value
@_validator
def log_console(self, stream):
"""
Sets the default logging console target.
Examples:
>>> context.log_level = 'warn'
>>> log.warn("Hello")
[!] Hello
>>> context.log_console=open('/dev/null', 'w')
>>> log.warn("Hello")
>>> context.clear()
"""
if isinstance(stream, str):
stream = open(stream, 'wt')
return stream
@property
def mask(self):
return (1 << self.bits) - 1
@_validator
def os(self, os):
"""
Operating system of the target machine.
The default value is ``linux``.
Allowed values are listed in :attr:`pwnlib.context.ContextType.oses`.
Examples:
>>> context.os = 'linux'
>>> context.os = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: os must be one of ['android', 'cgc', 'freebsd', 'linux', 'windows']
"""
os = os.lower()
if os not in ContextType.oses:
raise AttributeError("os must be one of %r" % ContextType.oses)
return os
@_validator
def randomize(self, r):
"""
Global flag that lots of things should be randomized.
"""
return bool(r)
@_validator
def signed(self, signed):
"""
Signed-ness for packing operation when it's not explicitly set.
Can be set to any non-string truthy value, or the specific string
values ``'signed'`` or ``'unsigned'`` which are converted into
:const:`True` and :const:`False` correspondingly.
Examples:
>>> context.signed
False
>>> context.signed = 1
>>> context.signed
True
>>> context.signed = 'signed'
>>> context.signed
True
>>> context.signed = 'unsigned'
>>> context.signed
False
>>> context.signed = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: signed must be one of ['no', 'signed', 'unsigned', 'yes'] or a non-string truthy value
"""
try: signed = ContextType.signednesses[signed]
except KeyError: pass
if isinstance(signed, str):
raise AttributeError('signed must be one of %r or a non-string truthy value' % sorted(ContextType.signednesses))
return bool(signed)
@_validator
def timeout(self, value=Timeout.default):
"""
Default amount of time to wait for a blocking operation before it times out,
specified in seconds.
The default value is to have an infinite timeout.
See :class:`pwnlib.timeout.Timeout` for additional information on
valid values.
"""
return Timeout(value).timeout
@_validator
def terminal(self, value):
"""
Default terminal used by :meth:`pwnlib.util.misc.run_in_new_terminal`.
Can be a string or an iterable of strings. In the latter case the first
entry is the terminal and the rest are default arguments.
"""
if isinstance(value, (str, unicode)):
return [value]
return value
@property
def abi(self):
return self._abi
@_validator
def proxy(self, proxy):
"""
Default proxy for all socket connections.
Accepts either a string (hostname or IP address) for a SOCKS5 proxy on
the default port, **or** a ``tuple`` passed to ``socks.set_default_proxy``,
e.g. ``(socks.SOCKS4, 'localhost', 1234)``.
>>> context.proxy = 'localhost' #doctest: +ELLIPSIS
>>> r=remote('google.com', 80)
Traceback (most recent call last):
...
ProxyConnectionError: Error connecting to SOCKS5 proxy localhost:1080: [Errno 111] Connection refused
>>> context.proxy = None
>>> r=remote('google.com', 80, level='error')
"""
if not proxy:
socket.socket = _original_socket
return None
if isinstance(proxy, str):
proxy = (socks.SOCKS5, proxy)
if not isinstance(proxy, collections.Iterable):
raise AttributeError('proxy must be a string hostname, or tuple of arguments for socks.set_default_proxy')
socks.set_default_proxy(*proxy)
socket.socket = socks.socksocket
return proxy
@_validator
def noptrace(self, value):
"""Disable all actions which rely on ptrace.
This is useful for switching between local exploitation with a debugger,
and remote exploitation (without a debugger).
This option can be set with the ``NOPTRACE`` command-line argument.
"""
return bool(value)
@_validator
def adb_host(self, value):
"""Sets the target host which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_HOST, or set
to the default 'localhost'.
"""
return str(value)
@_validator
def adb_port(self, value):
"""Sets the target port which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_PORT, or set
to the default 5037.
"""
return int(value)
@_validator
def device(self, device):
"""Sets the device being operated on.
"""
if isinstance(device, Device):
self.arch = device.arch or self.arch
self.bits = device.bits or self.bits
self.endian = device.endian or self.endian
self.os = device.os or self.os
elif isinstance(device, str):
device = Device(device)
elif device is not None:
raise AttributeError("device must be either a Device object or a serial number as a string")
return device
@property
def adb(self):
"""Returns an argument array for connecting to adb.
Unless ``$ADB_PATH`` is set, uses the default ``adb`` binary in ``$PATH``.
"""
ADB_PATH = os.environ.get('ADB_PATH', 'adb')
command = [ADB_PATH]
if self.adb_host != self.defaults['adb_host']:
command += ['-H', self.adb_host]
if self.adb_port != self.defaults['adb_port']:
command += ['-P', str(self.adb_port)]
if self.device:
command += ['-s', str(self.device)]
return command
@_validator
def buffer_size(self, size):
"""Internal buffer size to use for :class:`pwnlib.tubes.tube.tube` objects.
This is not the maximum size of the buffer, but this is the amount of data
which is passed to each raw ``read`` syscall (or equivalent).
"""
return int(size)
@property
def cache_dir(self):
"""Directory used for caching data.
Note:
May be either a path string, or :const:`None`.
Example:
>>> cache_dir = context.cache_dir
>>> cache_dir is not None
True
>>> os.chmod(cache_dir, 0o000)
>>> context.cache_dir is None
True
>>> os.chmod(cache_dir, 0o755)
>>> cache_dir == context.cache_dir
True
"""
home = os.path.expanduser('~')
if not os.access(home, os.W_OK):
return None
cache = os.path.join(home, '.pwntools-cache')
if not os.path.exists(cache):
try:
os.mkdir(cache)
except OSError:
return None
# Some wargames e.g. pwnable.kr have created dummy directories
# which cannot be modified by the user account (owned by root).
if not os.access(cache, os.W_OK):
return None
return cache
@_validator
def delete_corefiles(self, v):
"""Whether pwntools automatically deletes corefiles after exiting.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``False``.
"""
return bool(v)
@_validator
def rename_corefiles(self, v):
"""Whether pwntools automatically renames corefiles.
This is useful for two things:
- Prevent corefiles from being overwritten, if ``kernel.core_pattern``
is something simple like ``"core"``.
- Ensure corefiles are generated, if ``kernel.core_pattern`` uses ``apport``,
which refuses to overwrite any existing files.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``True``.
"""
return bool(v)
#*************************************************************************
# ALIASES
#*************************************************************************
#
# These fields are aliases for fields defined above, either for
# convenience or compatibility.
#
#*************************************************************************
def __call__(self, **kwargs):
"""
Alias for :meth:`pwnlib.context.ContextType.update`
"""
return self.update(**kwargs)
def reset_local(self):
"""
Deprecated. Use :meth:`clear`.
"""
self.clear()
@property
def endianness(self):
"""
Legacy alias for :attr:`endian`.
Examples:
>>> context.endian == context.endianness
True
"""
return self.endian
@endianness.setter
def endianness(self, value):
self.endian = value
@property
def sign(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@sign.setter
def sign(self, value):
self.signed = value
@property
def signedness(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@signedness.setter
def signedness(self, value):
self.signed = value
@property
def word_size(self):
"""
Alias for :attr:`bits`
"""
return self.bits
@word_size.setter
def word_size(self, value):
self.bits = value
Thread = Thread
#: Global :class:`.ContextType` object, used to store commonly-used pwntools settings.
#:
#: In most cases, the context is used to infer default variables values.
#: For example, :func:`.asm` can take an ``arch`` parameter as a
#: keyword argument.
#:
#: If it is not supplied, the ``arch`` specified by ``context`` is used instead.
#:
#: Consider it a shorthand to passing ``os=`` and ``arch=`` to every single
#: function call.
context = ContextType()
# Inherit default ADB values
if 'ANDROID_ADB_SERVER_HOST' in os.environ:
context.adb_host = os.environ.get('ANDROID_ADB_SERVER_HOST')
if 'ANDROID_ADB_SERVER_PORT' in os.environ:
context.adb_port = int(os.getenv('ANDROID_ADB_SERVER_PORT'))
def LocalContext(function):
"""
Wraps the specified function on a context.local() block, using kwargs.
Example:
>>> context.clear()
>>> @LocalContext
... def printArch():
... print(context.arch)
>>> printArch()
i386
>>> printArch(arch='arm')
arm
"""
@functools.wraps(function)
def setter(*a, **kw):
# Fast path to skip adding a Context frame
if not kw:
return function(*a)
with context.local(**{k:kw.pop(k) for k,v in kw.items() if isinstance(getattr(ContextType, k, None), property)}):
return function(*a, **kw)
return setter
# Read configuration options from the context section
def update_context_defaults(section):
# Circular imports FTW!
from pwnlib.util import safeeval
from pwnlib.log import getLogger
log = getLogger(__name__)
for key, value in section.items():
if key not in ContextType.defaults:
log.warn("Unknown configuration option %r in section %r" % (key, 'context'))
continue
if isinstance(ContextType.defaults[key], (str, unicode, tuple)):
value = safeeval.expr(value)
ContextType.defaults[key] = value
register_config('context', update_context_defaults)
|
client.py
|
"""A semi-synchronous Client for IPython parallel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
try:
from collections.abc import Iterable
except ImportError: # py2
from collections import Iterable
import socket
from concurrent.futures import Future
from getpass import getpass
import json
import os
from pprint import pprint
import sys
from threading import Thread, Event, current_thread
import time
import types
import warnings
pjoin = os.path.join
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from traitlets.config.configurable import MultipleInstanceError
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython import get_ipython
from IPython.utils.capture import RichOutput
from IPython.utils.coloransi import TermColors
from jupyter_client.localinterfaces import localhost, is_local_ip
from IPython.paths import get_ipython_dir
from IPython.utils.path import compress_user
from ipython_genutils.py3compat import cast_bytes, string_types, xrange, iteritems
from traitlets import (
HasTraits, Instance, Unicode,
Dict, List, Bool, Set, Any
)
from decorator import decorator
from ipyparallel import Reference
from ipyparallel import error
from ipyparallel import util
from jupyter_client.session import Session
from ipyparallel import serialize
from ipyparallel.serialize import PrePickled
from ..util import ioloop
from .asyncresult import AsyncResult, AsyncHubResult
from .futures import MessageFuture, multi_future
from .view import DirectView, LoadBalancedView
#--------------------------------------------------------------------------
# Decorators for Client methods
#--------------------------------------------------------------------------
@decorator
def unpack_message(f, self, msg_parts):
"""Unpack a message before calling the decorated method."""
idents, msg = self.session.feed_identities(msg_parts, copy=False)
try:
msg = self.session.deserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Message", exc_info=True)
else:
if self.debug:
pprint(msg)
return f(self, msg)
#--------------------------------------------------------------------------
# Classes
#--------------------------------------------------------------------------
_no_connection_file_msg = """
Failed to connect because no Controller could be found.
Please double-check your profile and ensure that a cluster is running.
"""
class ExecuteReply(RichOutput):
"""wrapper for finished Execute results"""
def __init__(self, msg_id, content, metadata):
self.msg_id = msg_id
self._content = content
self.execution_count = content['execution_count']
self.metadata = metadata
# RichOutput overrides
@property
def source(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('source', '')
@property
def data(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('data', {})
return {}
@property
def _metadata(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('metadata', {})
return {}
def display(self):
from IPython.display import publish_display_data
publish_display_data(self.data, self.metadata)
def _repr_mime_(self, mime):
if mime not in self.data:
return
data = self.data[mime]
if mime in self._metadata:
return data, self._metadata[mime]
else:
return data
def _repr_mimebundle_(self, *args, **kwargs):
data, md = self.data, self.metadata
if 'text/plain' in data:
data = data.copy()
data['text/plain'] = self._plaintext()
return data, md
def __getitem__(self, key):
return self.metadata[key]
def __getattr__(self, key):
if key not in self.metadata:
raise AttributeError(key)
return self.metadata[key]
def __repr__(self):
execute_result = self.metadata['execute_result'] or {'data':{}}
text_out = execute_result['data'].get('text/plain', '')
if len(text_out) > 32:
text_out = text_out[:29] + '...'
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
def _plaintext(self):
execute_result = self.metadata['execute_result'] or {'data':{}}
text_out = execute_result['data'].get('text/plain', '')
if not text_out:
return ''
ip = get_ipython()
if ip is None:
colors = "NoColor"
else:
colors = ip.colors
if colors == "NoColor":
out = normal = ""
else:
out = TermColors.Red
normal = TermColors.Normal
if '\n' in text_out and not text_out.startswith('\n'):
# add newline for multiline reprs
text_out = '\n' + text_out
return u''.join([
out,
u'Out[%i:%i]: ' % (
self.metadata['engine_id'], self.execution_count
),
normal,
text_out,
])
def _repr_pretty_(self, p, cycle):
p.text(self._plaintext())
class Metadata(dict):
"""Subclass of dict for initializing metadata values.
Attribute access works on keys.
These objects have a strict set of keys - errors will raise if you try
to add new keys.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
md = {'msg_id' : None,
'submitted' : None,
'started' : None,
'completed' : None,
'received' : None,
'engine_uuid' : None,
'engine_id' : None,
'follow' : None,
'after' : None,
'status' : None,
'execute_input' : None,
'execute_result' : None,
'error' : None,
'stdout' : '',
'stderr' : '',
'outputs' : [],
'data': {},
}
self.update(md)
self.update(dict(*args, **kwargs))
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self:
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if key in self:
self[key] = value
else:
raise AttributeError(key)
def __setitem__(self, key, value):
"""strict static key enforcement"""
if key in self:
dict.__setitem__(self, key, value)
else:
raise KeyError(key)
def _is_future(f):
"""light duck-typing check for Futures"""
return hasattr(f, 'add_done_callback')
class Client(HasTraits):
"""A semi-synchronous client to an IPython parallel cluster
Parameters
----------
url_file : str
The path to ipcontroller-client.json.
This JSON file should contain all the information needed to connect to a cluster,
and is likely the only argument needed.
Connection information for the Hub's registration. If a json connector
file is given, then likely no further configuration is necessary.
[Default: use profile]
profile : bytes
The name of the Cluster profile to be used to find connector information.
If run from an IPython application, the default profile will be the same
as the running application, otherwise it will be 'default'.
cluster_id : str
String id to added to runtime files, to prevent name collisions when using
multiple clusters with a single profile simultaneously.
When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but
should generally work)
context : zmq.Context
Pass an existing zmq.Context instance, otherwise the client will create its own.
debug : bool
flag for lots of message printing for debug purposes
timeout : float
time (in seconds) to wait for connection replies from the Hub
[Default: 10]
Other Parameters
----------------
sshserver : str
A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
If keyfile or password is specified, and this is not, it will default to
the ip given in addr.
sshkey : str; path to ssh private key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str
Your ssh password to sshserver. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
paramiko : bool
flag for whether to use paramiko instead of shell ssh for tunneling.
[default: True on win32, False else]
Attributes
----------
ids : list of int engine IDs
requesting the ids attribute always synchronizes
the registration state. To request ids without synchronization,
use semi-private _ids attributes.
history : list of msg_ids
a list of msg_ids, keeping track of all the execution
messages you have submitted in order.
outstanding : set of msg_ids
a set of msg_ids that have been submitted, but whose
results have not yet been received.
results : dict
a dict of all our results, keyed by msg_id
block : bool
determines default behavior when block not specified
in execution methods
"""
block = Bool(False)
outstanding = Set()
results = Instance('collections.defaultdict', (dict,))
metadata = Instance('collections.defaultdict', (Metadata,))
history = List()
debug = Bool(False)
_futures = Dict()
_output_futures = Dict()
_io_loop = Any()
_io_thread = Any()
profile=Unicode()
def _profile_default(self):
if BaseIPythonApplication.initialized():
# an IPython app *might* be running, try to get its profile
try:
return BaseIPythonApplication.instance().profile
except (AttributeError, MultipleInstanceError):
# could be a *different* subclass of config.Application,
# which would raise one of these two errors.
return u'default'
else:
return u'default'
_outstanding_dict = Instance('collections.defaultdict', (set,))
_ids = List()
_connected=Bool(False)
_ssh=Bool(False)
_context = Instance('zmq.Context', allow_none=True)
_config = Dict()
_engines=Instance(util.ReverseDict, (), {})
_query_socket=Instance('zmq.Socket', allow_none=True)
_control_socket=Instance('zmq.Socket', allow_none=True)
_iopub_socket=Instance('zmq.Socket', allow_none=True)
_notification_socket=Instance('zmq.Socket', allow_none=True)
_mux_socket=Instance('zmq.Socket', allow_none=True)
_task_socket=Instance('zmq.Socket', allow_none=True)
_task_scheme=Unicode()
_closed = False
def __new__(self, *args, **kw):
# don't raise on positional args
return HasTraits.__new__(self, **kw)
def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
context=None, debug=False,
sshserver=None, sshkey=None, password=None, paramiko=None,
timeout=10, cluster_id=None, **extra_args
):
if profile:
super(Client, self).__init__(debug=debug, profile=profile)
else:
super(Client, self).__init__(debug=debug)
if context is None:
context = zmq.Context.instance()
self._context = context
if 'url_or_file' in extra_args:
url_file = extra_args['url_or_file']
warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
if url_file and util.is_url(url_file):
raise ValueError("single urls cannot be specified, url-files must be used.")
self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
no_file_msg = '\n'.join([
"You have attempted to connect to an IPython Cluster but no Controller could be found.",
"Please double-check your configuration and ensure that a cluster is running.",
])
if self._cd is not None:
if url_file is None:
if not cluster_id:
client_json = 'ipcontroller-client.json'
else:
client_json = 'ipcontroller-%s-client.json' % cluster_id
url_file = pjoin(self._cd.security_dir, client_json)
short = compress_user(url_file)
if not os.path.exists(url_file):
print("Waiting for connection file: %s" % short)
waiting_time = 0.
while waiting_time < timeout:
time.sleep(min(timeout-waiting_time, 1))
waiting_time += 1
if os.path.exists(url_file):
break
if not os.path.exists(url_file):
msg = '\n'.join([
"Connection file %r not found." % short,
no_file_msg,
])
raise IOError(msg)
if url_file is None:
raise IOError(no_file_msg)
if not os.path.exists(url_file):
# Connection file explicitly specified, but not found
raise IOError("Connection file %r not found. Is a controller running?" % \
compress_user(url_file)
)
with open(url_file) as f:
cfg = json.load(f)
self._task_scheme = cfg['task_scheme']
# sync defaults from args, json:
if sshserver:
cfg['ssh'] = sshserver
location = cfg.setdefault('location', None)
proto,addr = cfg['interface'].split('://')
addr = util.disambiguate_ip_address(addr, location)
cfg['interface'] = "%s://%s" % (proto, addr)
# turn interface,port into full urls:
for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
cfg[key] = cfg['interface'] + ':%i' % cfg[key]
url = cfg['registration']
if location is not None and addr == localhost():
# location specified, and connection is expected to be local
location_ip = util.ip_for_host(location)
if not is_local_ip(location_ip) and not sshserver:
# load ssh from JSON *only* if the controller is not on
# this machine
sshserver=cfg['ssh']
if not is_local_ip(location_ip) and not sshserver and\
location != socket.gethostname():
# warn if no ssh specified, but SSH is probably needed
# This is only a warning, because the most likely cause
# is a local Controller on a laptop whose IP is dynamic
warnings.warn("""
Controller appears to be listening on localhost, but not on this machine.
If this is true, you should specify Client(...,sshserver='you@%s')
or instruct your controller to listen on an external IP.""" % location,
RuntimeWarning)
elif not sshserver:
# otherwise sync with cfg
sshserver = cfg['ssh']
self._config = cfg
self._ssh = bool(sshserver or sshkey or password)
if self._ssh and sshserver is None:
# default to ssh via localhost
sshserver = addr
if self._ssh and password is None:
from zmq.ssh import tunnel
if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
# configure and construct the session
try:
extra_args['packer'] = cfg['pack']
extra_args['unpacker'] = cfg['unpack']
extra_args['key'] = cast_bytes(cfg['key'])
extra_args['signature_scheme'] = cfg['signature_scheme']
except KeyError as exc:
msg = '\n'.join([
"Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
"If you are reusing connection files, remove them and start ipcontroller again."
])
raise ValueError(msg.format(exc.message))
self.session = Session(**extra_args)
self._query_socket = self._context.socket(zmq.DEALER)
if self._ssh:
from zmq.ssh import tunnel
tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver,
timeout=timeout, **ssh_kwargs)
else:
self._query_socket.connect(cfg['registration'])
self.session.debug = self.debug
self._notification_handlers = {'registration_notification' : self._register_engine,
'unregistration_notification' : self._unregister_engine,
'shutdown_notification' : lambda msg: self.close(),
}
self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
'apply_reply' : self._handle_apply_reply}
try:
self._connect(sshserver, ssh_kwargs, timeout)
except:
self.close(linger=0)
raise
# last step: setup magics, if we are in IPython:
ip = get_ipython()
if ip is None:
return
else:
if 'px' not in ip.magics_manager.magics:
# in IPython but we are the first Client.
# activate a default view for parallel magics.
self.activate()
def __del__(self):
"""cleanup sockets, but _not_ context."""
self.close()
def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
if ipython_dir is None:
ipython_dir = get_ipython_dir()
if profile_dir is not None:
try:
self._cd = ProfileDir.find_profile_dir(profile_dir)
return
except ProfileDirError:
pass
elif profile is not None:
try:
self._cd = ProfileDir.find_profile_dir_by_name(
ipython_dir, profile)
return
except ProfileDirError:
pass
self._cd = None
def _update_engines(self, engines):
"""Update our engines dict and _ids from a dict of the form: {id:uuid}."""
for k,v in iteritems(engines):
eid = int(k)
if eid not in self._engines:
self._ids.append(eid)
self._engines[eid] = v
self._ids = sorted(self._ids)
if sorted(self._engines.keys()) != list(range(len(self._engines))) and \
self._task_scheme == 'pure' and self._task_socket:
self._stop_scheduling_tasks()
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning)
def _build_targets(self, targets):
"""Turn valid target IDs or 'all' into two lists:
(int_ids, uuids).
"""
if not self._ids:
# flush notification socket if no engines yet, just in case
if not self.ids:
raise error.NoEnginesRegistered("Can't build targets without any engines")
if targets is None:
targets = self._ids
elif isinstance(targets, string_types):
if targets.lower() == 'all':
targets = self._ids
else:
raise TypeError("%r not valid str target, must be 'all'"%(targets))
elif isinstance(targets, int):
if targets < 0:
targets = self.ids[targets]
if targets not in self._ids:
raise IndexError("No such engine: %i"%targets)
targets = [targets]
if isinstance(targets, slice):
indices = list(range(len(self._ids))[targets])
ids = self.ids
targets = [ ids[i] for i in indices ]
if not isinstance(targets, (tuple, list, xrange)):
raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
return [cast_bytes(self._engines[t]) for t in targets], list(targets)
def _connect(self, sshserver, ssh_kwargs, timeout):
"""setup all our socket connections to the cluster. This is called from
__init__."""
# Maybe allow reconnecting?
if self._connected:
return
self._connected=True
def connect_socket(s, url):
if self._ssh:
from zmq.ssh import tunnel
return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
else:
return s.connect(url)
self.session.send(self._query_socket, 'connection_request')
# use Poller because zmq.select has wrong units in pyzmq 2.1.7
poller = zmq.Poller()
poller.register(self._query_socket, zmq.POLLIN)
# poll expects milliseconds, timeout is seconds
evts = poller.poll(timeout*1000)
if not evts:
raise error.TimeoutError("Hub connection request timed out")
idents, msg = self.session.recv(self._query_socket, mode=0)
if self.debug:
pprint(msg)
content = msg['content']
# self._config['registration'] = dict(content)
cfg = self._config
if content['status'] == 'ok':
self._mux_socket = self._context.socket(zmq.DEALER)
connect_socket(self._mux_socket, cfg['mux'])
self._task_socket = self._context.socket(zmq.DEALER)
connect_socket(self._task_socket, cfg['task'])
self._notification_socket = self._context.socket(zmq.SUB)
self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._notification_socket, cfg['notification'])
self._control_socket = self._context.socket(zmq.DEALER)
connect_socket(self._control_socket, cfg['control'])
self._iopub_socket = self._context.socket(zmq.SUB)
self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._iopub_socket, cfg['iopub'])
self._update_engines(dict(content['engines']))
else:
self._connected = False
tb = '\n'.join(content.get('traceback', []))
raise Exception("Failed to connect! %s" % tb)
self._start_io_thread()
#--------------------------------------------------------------------------
# handlers and callbacks for incoming messages
#--------------------------------------------------------------------------
def _unwrap_exception(self, content):
"""unwrap exception, and remap engine_id to int."""
e = error.unwrap_exception(content)
# print e.traceback
if e.engine_info:
e_uuid = e.engine_info['engine_uuid']
eid = self._engines[e_uuid]
e.engine_info['engine_id'] = eid
return e
def _extract_metadata(self, msg):
header = msg['header']
parent = msg['parent_header']
msg_meta = msg['metadata']
content = msg['content']
md = {'msg_id' : parent['msg_id'],
'received' : util.utcnow(),
'engine_uuid' : msg_meta.get('engine', None),
'follow' : msg_meta.get('follow', []),
'after' : msg_meta.get('after', []),
'status' : content['status'],
}
if md['engine_uuid'] is not None:
md['engine_id'] = self._engines.get(md['engine_uuid'], None)
if 'date' in parent:
md['submitted'] = parent['date']
if 'started' in msg_meta:
md['started'] = util._parse_date(msg_meta['started'])
if 'date' in header:
md['completed'] = header['date']
return md
def _register_engine(self, msg):
"""Register a new engine, and update our connection info."""
content = msg['content']
eid = content['id']
d = {eid : content['uuid']}
self._update_engines(d)
def _unregister_engine(self, msg):
"""Unregister an engine that has died."""
content = msg['content']
eid = int(content['id'])
if eid in self._ids:
self._ids.remove(eid)
uuid = self._engines.pop(eid)
self._handle_stranded_msgs(eid, uuid)
if self._task_socket and self._task_scheme == 'pure':
self._stop_scheduling_tasks()
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
of the unregistration and later receive the successful result.
"""
outstanding = self._outstanding_dict[uuid]
for msg_id in list(outstanding):
if msg_id in self.results:
# we already
continue
try:
raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
except:
content = error.wrap_exception()
# build a fake message:
msg = self.session.msg('apply_reply', content=content)
msg['parent_header']['msg_id'] = msg_id
msg['metadata']['engine'] = uuid
self._handle_apply_reply(msg)
def _handle_execute_reply(self, msg):
"""Save the reply to an execute_request into our results.
execute messages are never actually used. apply is used instead.
"""
parent = msg['parent_header']
msg_id = parent['msg_id']
future = self._futures.get(msg_id, None)
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = ExecuteReply(msg_id, content, md)
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
# aborted tasks will not get output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
if content['status'] != 'ok' and not content.get('engine_info'):
# not an engine failure, don't expect output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
if future:
future.set_result(self.results[msg_id])
def _handle_apply_reply(self, msg):
"""Save the reply to an apply_request into our results."""
parent = msg['parent_header']
msg_id = parent['msg_id']
future = self._futures.get(msg_id, None)
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
print(self.results[msg_id])
print(msg)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = serialize.deserialize_object(msg['buffers'])[0]
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
if content['status'] != 'ok' and not content.get('engine_info'):
# not an engine failure, don't expect output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
if future:
future.set_result(self.results[msg_id])
def _make_io_loop(self):
"""Make my IOLoop. Override with IOLoop.current to return"""
if 'asyncio' in sys.modules:
# tornado 5 on asyncio requires creating a new asyncio loop
import asyncio
try:
asyncio.get_event_loop()
except RuntimeError:
# no asyncio loop, make one
# e.g.
asyncio.set_event_loop(asyncio.new_event_loop())
loop = ioloop.IOLoop()
loop.make_current()
return loop
def _stop_io_thread(self):
"""Stop my IO thread"""
if self._io_loop:
self._io_loop.add_callback(self._io_loop.stop)
if self._io_thread and self._io_thread is not current_thread():
self._io_thread.join()
def _setup_streams(self):
self._query_stream = ZMQStream(self._query_socket, self._io_loop)
self._query_stream.on_recv(self._dispatch_single_reply, copy=False)
self._control_stream = ZMQStream(self._control_socket, self._io_loop)
self._control_stream.on_recv(self._dispatch_single_reply, copy=False)
self._mux_stream = ZMQStream(self._mux_socket, self._io_loop)
self._mux_stream.on_recv(self._dispatch_reply, copy=False)
self._task_stream = ZMQStream(self._task_socket, self._io_loop)
self._task_stream.on_recv(self._dispatch_reply, copy=False)
self._iopub_stream = ZMQStream(self._iopub_socket, self._io_loop)
self._iopub_stream.on_recv(self._dispatch_iopub, copy=False)
self._notification_stream = ZMQStream(self._notification_socket, self._io_loop)
self._notification_stream.on_recv(self._dispatch_notification, copy=False)
def _start_io_thread(self):
"""Start IOLoop in a background thread."""
evt = Event()
self._io_thread = Thread(target=self._io_main, args=(evt,))
self._io_thread.daemon = True
self._io_thread.start()
# wait for the IOLoop to start
for i in range(10):
if evt.wait(1):
return
if not self._io_thread.is_alive():
raise RuntimeError("IO Loop failed to start")
else:
raise RuntimeError("Start event was never set. Maybe a problem in the IO thread.")
def _io_main(self, start_evt=None):
"""main loop for background IO thread"""
self._io_loop = self._make_io_loop()
self._setup_streams()
# signal that start has finished
# so that the main thread knows that all our attributes are defined
if start_evt:
start_evt.set()
self._io_loop.start()
self._io_loop.close()
@unpack_message
def _dispatch_single_reply(self, msg):
"""Dispatch single (non-execution) replies"""
msg_id = msg['parent_header'].get('msg_id', None)
future = self._futures.get(msg_id)
if future is not None:
future.set_result(msg)
@unpack_message
def _dispatch_notification(self, msg):
"""Dispatch notification messages"""
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise KeyError("Unhandled notification message type: %s" % msg_type)
else:
handler(msg)
@unpack_message
def _dispatch_reply(self, msg):
"""handle execution replies waiting in ZMQ queue."""
msg_type = msg['header']['msg_type']
handler = self._queue_handlers.get(msg_type, None)
if handler is None:
raise KeyError("Unhandled reply message type: %s" % msg_type)
else:
handler(msg)
@unpack_message
def _dispatch_iopub(self, msg):
"""handler for IOPub messages"""
parent = msg['parent_header']
if not parent or parent['session'] != self.session.session:
# ignore IOPub messages not from here
return
msg_id = parent['msg_id']
content = msg['content']
header = msg['header']
msg_type = msg['header']['msg_type']
if msg_type == 'status' and msg_id not in self.metadata:
# ignore status messages if they aren't mine
return
# init metadata:
md = self.metadata[msg_id]
if msg_type == 'stream':
name = content['name']
s = md[name] or ''
md[name] = s + content['text']
elif msg_type == 'error':
md.update({'error' : self._unwrap_exception(content)})
elif msg_type == 'execute_input':
md.update({'execute_input' : content['code']})
elif msg_type == 'display_data':
md['outputs'].append(content)
elif msg_type == 'execute_result':
md['execute_result'] = content
elif msg_type == 'data_message':
data, remainder = serialize.deserialize_object(msg['buffers'])
md['data'].update(data)
elif msg_type == 'status':
# idle message comes after all outputs
if content['execution_state'] == 'idle':
future = self._output_futures.get(msg_id)
if future and not future.done():
# TODO: should probably store actual outputs on the Future
future.set_result(None)
else:
# unhandled msg_type (status, etc.)
pass
def _send(self, socket, msg_type, content=None, parent=None, ident=None,
buffers=None, track=False, header=None, metadata=None):
"""Send a message in the IO thread
returns msg object"""
if self._closed:
raise IOError("Connections have been closed.")
msg = self.session.msg(msg_type, content=content, parent=parent,
header=header, metadata=metadata)
msg_id = msg['header']['msg_id']
asyncresult = False
if msg_type in {'execute_request', 'apply_request'}:
asyncresult = True
# add future for output
self._output_futures[msg_id] = output = MessageFuture(msg_id)
# hook up metadata
output.metadata = self.metadata[msg_id]
self._futures[msg_id] = future = MessageFuture(msg_id, track=track)
futures = [future]
if asyncresult:
future.output = output
futures.append(output)
output.metadata['submitted'] = util.utcnow()
def cleanup(f):
"""Purge caches on Future resolution"""
self.results.pop(msg_id, None)
self._futures.pop(msg_id, None)
self._output_futures.pop(msg_id, None)
self.metadata.pop(msg_id, None)
multi_future(futures).add_done_callback(cleanup)
def _really_send():
sent = self.session.send(socket, msg, track=track, buffers=buffers, ident=ident)
if track:
future.tracker.set_result(sent['tracker'])
# hand off actual send to IO thread
self._io_loop.add_callback(_really_send)
return future
def _send_recv(self, *args, **kwargs):
"""Send a message in the IO thread and return its reply"""
future = self._send(*args, **kwargs)
future.wait()
return future.result()
#--------------------------------------------------------------------------
# len, getitem
#--------------------------------------------------------------------------
def __len__(self):
"""len(client) returns # of engines."""
return len(self.ids)
def __getitem__(self, key):
"""index access returns DirectView multiplexer objects
Must be int, slice, or list/tuple/xrange of ints"""
if not isinstance(key, (int, slice, tuple, list, xrange)):
raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
else:
return self.direct_view(key)
def __iter__(self):
"""Since we define getitem, Client is iterable
but unless we also define __iter__, it won't work correctly unless engine IDs
start at zero and are continuous.
"""
for eid in self.ids:
yield self.direct_view(eid)
#--------------------------------------------------------------------------
# Begin public methods
#--------------------------------------------------------------------------
@property
def ids(self):
"""Always up-to-date ids property."""
# always copy:
return list(self._ids)
def activate(self, targets='all', suffix=''):
"""Create a DirectView and register it with IPython magics
Defines the magics `%px, %autopx, %pxresult, %%px`
Parameters
----------
targets: int, list of ints, or 'all'
The engines on which the view's magics will run
suffix: str [default: '']
The suffix, if any, for the magics. This allows you to have
multiple views associated with parallel magics at the same time.
e.g. ``rc.activate(targets=0, suffix='0')`` will give you
the magics ``%px0``, ``%pxresult0``, etc. for running magics just
on engine 0.
"""
view = self.direct_view(targets)
view.block = True
view.activate(suffix)
return view
def close(self, linger=None):
"""Close my zmq Sockets
If `linger`, set the zmq LINGER socket option,
which allows discarding of messages.
"""
if self._closed:
return
self._stop_io_thread()
snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
for name in snames:
socket = getattr(self, name)
if socket is not None and not socket.closed:
if linger is not None:
socket.close(linger=linger)
else:
socket.close()
self._closed = True
def spin_thread(self, interval=1):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin_thread is deprecated now that IO is always in a thread", DeprecationWarning)
def stop_spin_thread(self):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin_thread is deprecated now that IO is always in a thread", DeprecationWarning)
def spin(self):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin is deprecated now that IO is in a thread", DeprecationWarning)
def _await_futures(self, futures, timeout):
"""Wait for a collection of futures"""
if not futures:
return True
event = Event()
if timeout and timeout < 0:
timeout = None
f = multi_future(futures)
f.add_done_callback(lambda f: event.set())
return event.wait(timeout)
def _futures_for_msgs(self, msg_ids):
"""Turn msg_ids into Futures
msg_ids not in futures dict are presumed done.
"""
futures = []
for msg_id in msg_ids:
f = self._futures.get(msg_id, None)
if f:
futures.append(f)
return futures
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
futures = []
if jobs is None:
if not self.outstanding:
return True
# make a copy, so that we aren't passing a mutable collection to _futures_for_msgs
theids = set(self.outstanding)
else:
if isinstance(jobs, string_types + (int, AsyncResult)) \
or not isinstance(jobs, Iterable):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
theids.update(job.msg_ids)
continue
elif _is_future(job):
futures.append(job)
continue
theids.add(job)
if not futures and not theids.intersection(self.outstanding):
return True
futures.extend(self._futures_for_msgs(theids))
return self._await_futures(futures, timeout)
def wait_interactive(self, jobs=None, interval=1., timeout=-1.):
"""Wait interactively for jobs
If no job is specified, will wait for all outstanding jobs to complete.
"""
if jobs is None:
# get futures for results
futures = [ f for f in self._futures.values() if hasattr(f, 'output') ]
ar = AsyncResult(self, futures, owner=False)
else:
ar = self._asyncresult_from_jobs(jobs, owner=False)
return ar.wait_interactive(interval=interval, timeout=timeout)
#--------------------------------------------------------------------------
# Control methods
#--------------------------------------------------------------------------
def clear(self, targets=None, block=None):
"""Clear the namespace in target(s)."""
block = self.block if block is None else block
targets = self._build_targets(targets)[0]
futures = []
for t in targets:
futures.append(self._send(self._control_stream, 'clear_request', content={}, ident=t))
if not block:
return multi_future(futures)
for future in futures:
future.wait()
msg = future.result()
if msg['content']['status'] != 'ok':
raise self._unwrap_exception(msg['content'])
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, string_types + (AsyncResult,)):
jobs = [jobs]
bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
futures = []
for t in targets:
futures.append(self._send(self._control_stream, 'abort_request',
content=content, ident=t))
if not block:
return multi_future(futures)
else:
for f in futures:
f.wait()
msg = f.result()
if msg['content']['status'] != 'ok':
raise self._unwrap_exception(msg['content'])
def shutdown(self, targets='all', restart=False, hub=False, block=None):
"""Terminates one or more engine processes, optionally including the hub.
Parameters
----------
targets: list of ints or 'all' [default: all]
Which engines to shutdown.
hub: bool [default: False]
Whether to include the Hub. hub=True implies targets='all'.
block: bool [default: self.block]
Whether to wait for clean shutdown replies or not.
restart: bool [default: False]
NOT IMPLEMENTED
whether to restart engines after shutting them down.
"""
from ipyparallel.error import NoEnginesRegistered
if restart:
raise NotImplementedError("Engine restart is not yet implemented")
block = self.block if block is None else block
if hub:
targets = 'all'
try:
targets = self._build_targets(targets)[0]
except NoEnginesRegistered:
targets = []
futures = []
for t in targets:
futures.append(self._send(self._control_stream, 'shutdown_request',
content={'restart':restart},ident=t))
error = False
if block or hub:
for f in futures:
f.wait()
msg = f.result()
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if hub:
# don't trigger close on shutdown notification, which will prevent us from receiving the reply
self._notification_handlers['shutdown_notification'] = lambda msg: None
msg = self._send_recv(self._query_stream, 'shutdown_request')
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if not error:
self.close()
if error:
raise error
def become_dask(self, targets='all', port=0, nanny=False, scheduler_args=None, **worker_args):
"""Turn the IPython cluster into a dask.distributed cluster
Parameters
----------
targets: target spec (default: all)
Which engines to turn into dask workers.
port: int (default: random)
Which port
nanny: bool (default: False)
Whether to start workers as subprocesses instead of in the engine process.
Using a nanny allows restarting the worker processes via ``executor.restart``.
scheduler_args: dict
Keyword arguments (e.g. ip) to pass to the distributed.Scheduler constructor.
**worker_args:
Any additional keyword arguments (e.g. ncores) are passed to the distributed.Worker constructor.
Returns
-------
client = distributed.Client
A dask.distributed.Client connected to the dask cluster.
"""
import distributed
dview = self.direct_view(targets)
if scheduler_args is None:
scheduler_args = {}
else:
scheduler_args = dict(scheduler_args) # copy
# Start a Scheduler on the Hub:
reply = self._send_recv(self._query_stream, 'become_dask_request',
{'scheduler_args': scheduler_args},
)
if reply['content']['status'] != 'ok':
raise self._unwrap_exception(reply['content'])
distributed_info = reply['content']
# Start a Worker on the selected engines:
worker_args['ip'] = distributed_info['ip']
worker_args['port'] = distributed_info['port']
worker_args['nanny'] = nanny
# set default ncores=1, since that's how an IPython cluster is typically set up.
worker_args.setdefault('ncores', 1)
dview.apply_sync(util.become_dask_worker, **worker_args)
# Finally, return a Client connected to the Scheduler
try:
distributed_Client = distributed.Client
except AttributeError:
# For distributed pre-1.18.1
distributed_Client = distributed.Executor
client = distributed_Client('{ip}:{port}'.format(**distributed_info))
return client
def stop_dask(self, targets='all'):
"""Stop the distributed Scheduler and Workers started by become_dask.
Parameters
----------
targets: target spec (default: all)
Which engines to stop dask workers on.
"""
dview = self.direct_view(targets)
# Start a Scheduler on the Hub:
reply = self._send_recv(self._query_stream, 'stop_distributed_request')
if reply['content']['status'] != 'ok':
raise self._unwrap_exception(reply['content'])
# Finally, stop all the Workers on the engines
dview.apply_sync(util.stop_distributed_worker)
# aliases:
become_distributed = become_dask
stop_distributed = stop_dask
#--------------------------------------------------------------------------
# Execution related methods
#--------------------------------------------------------------------------
def _maybe_raise(self, result):
"""wrapper for maybe raising an exception if apply failed."""
if isinstance(result, error.RemoteError):
raise result
return result
def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
ident=None):
"""construct and send an apply message via a socket.
This is the principal method with which all engine execution is performed by views.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
args = args if args is not None else []
kwargs = kwargs if kwargs is not None else {}
metadata = metadata if metadata is not None else {}
# validate arguments
if not callable(f) and not isinstance(f, (Reference, PrePickled)):
raise TypeError("f must be callable, not %s"%type(f))
if not isinstance(args, (tuple, list)):
raise TypeError("args must be tuple or list, not %s"%type(args))
if not isinstance(kwargs, dict):
raise TypeError("kwargs must be dict, not %s"%type(kwargs))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s"%type(metadata))
bufs = serialize.pack_apply_message(f, args, kwargs,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
future = self._send(socket, "apply_request", buffers=bufs, ident=ident,
metadata=metadata, track=track)
msg_id = future.msg_id
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
return future
def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(code, string_types):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s" % type(metadata))
content = dict(code=code, silent=bool(silent), user_expressions={})
future = self._send(socket, "execute_request", content=content, ident=ident,
metadata=metadata)
msg_id = future.msg_id
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = util.utcnow()
return future
#--------------------------------------------------------------------------
# construct a View object
#--------------------------------------------------------------------------
def load_balanced_view(self, targets=None, **kwargs):
"""construct a DirectView object.
If no arguments are specified, create a LoadBalancedView
using all engines.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance execution
kwargs: passed to LoadBalancedView
"""
if targets == 'all':
targets = None
if targets is not None:
targets = self._build_targets(targets)[1]
return LoadBalancedView(client=self, socket=self._task_stream, targets=targets,
**kwargs)
def executor(self, targets=None):
"""Construct a PEP-3148 Executor with a LoadBalancedView
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance execution
Returns
-------
executor: Executor
The Executor object
"""
return self.load_balanced_view(targets).executor
def direct_view(self, targets='all', **kwargs):
"""construct a DirectView object.
If no targets are specified, create a DirectView using all engines.
rc.direct_view('all') is distinguished from rc[:] in that 'all' will
evaluate the target engines at each execution, whereas rc[:] will connect to
all *current* engines, and that list will not change.
That is, 'all' will always use all engines, whereas rc[:] will not use
engines added after the DirectView is constructed.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The engines to use for the View
kwargs: passed to DirectView
"""
single = isinstance(targets, int)
# allow 'all' to be lazily evaluated at each execution
if targets != 'all':
targets = self._build_targets(targets)[1]
if single:
targets = targets[0]
return DirectView(client=self, socket=self._mux_stream, targets=targets,
**kwargs)
#--------------------------------------------------------------------------
# Query methods
#--------------------------------------------------------------------------
def get_result(self, indices_or_msg_ids=None, block=None, owner=True):
"""Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
If the client already has the results, no request to the Hub will be made.
This is a convenient way to construct AsyncResult objects, which are wrappers
that include metadata about execution, and allow for awaiting results that
were not submitted by this Client.
It can also be a convenient way to retrieve the metadata associated with
blocking execution, since it always retrieves
Examples
--------
::
In [10]: r = client.apply()
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, AsyncResult,
or a list of same.
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
owner : bool [default: True]
Whether this AsyncResult should own the result.
If so, calling `ar.get()` will remove data from the
client's result and metadata cache.
There should only be one owner of any given msg_id.
Returns
-------
AsyncResult
A single AsyncResult object will always be returned.
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
ar = self._asyncresult_from_jobs(indices_or_msg_ids, owner=owner)
if block:
ar.wait()
return ar
def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
"""Resubmit one or more tasks.
in-flight tasks may not be resubmitted.
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
theids = self._msg_ids_from_jobs(indices_or_msg_ids)
content = dict(msg_ids = theids)
reply = self._send_recv(self._query_stream, 'resubmit_request', content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
mapping = content['resubmitted']
new_ids = [ mapping[msg_id] for msg_id in theids ]
ar = AsyncHubResult(self, new_ids)
if block:
ar.wait()
return ar
def result_status(self, msg_ids, status_only=True):
"""Check on the status of the result(s) of the apply request with `msg_ids`.
If status_only is False, then the actual results will be retrieved, else
only the status of the results will be checked.
Parameters
----------
msg_ids : list of msg_ids
if int:
Passed as index to self.history for convenience.
status_only : bool (default: True)
if False:
Retrieve the actual results of completed tasks.
Returns
-------
results : dict
There will always be the keys 'pending' and 'completed', which will
be lists of msg_ids that are incomplete or complete. If `status_only`
is False, then completed results will be keyed by their `msg_id`.
"""
theids = self._msg_ids_from_jobs(msg_ids)
completed = []
local_results = {}
# comment this block out to temporarily disable local shortcut:
for msg_id in theids:
if msg_id in self.results:
completed.append(msg_id)
local_results[msg_id] = self.results[msg_id]
theids.remove(msg_id)
if theids: # some not locally cached
content = dict(msg_ids=theids, status_only=status_only)
reply = self._send_recv(self._query_stream, "result_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
buffers = reply['buffers']
else:
content = dict(completed=[],pending=[])
content['completed'].extend(completed)
if status_only:
return content
failures = []
# load cached results into result:
content.update(local_results)
# update cache with results:
for msg_id in sorted(theids):
if msg_id in content['completed']:
rec = content[msg_id]
parent = util.extract_dates(rec['header'])
header = util.extract_dates(rec['result_header'])
rcontent = rec['result_content']
iodict = rec['io']
if isinstance(rcontent, str):
rcontent = self.session.unpack(rcontent)
md = self.metadata[msg_id]
md_msg = dict(
content=rcontent,
parent_header=parent,
header=header,
metadata=rec['result_metadata'],
)
md.update(self._extract_metadata(md_msg))
if rec.get('received'):
md['received'] = util._parse_date(rec['received'])
md.update(iodict)
if rcontent['status'] == 'ok':
if header['msg_type'] == 'apply_reply':
res,buffers = serialize.deserialize_object(buffers)
elif header['msg_type'] == 'execute_reply':
res = ExecuteReply(msg_id, rcontent, md)
else:
raise KeyError("unhandled msg type: %r" % header['msg_type'])
else:
res = self._unwrap_exception(rcontent)
failures.append(res)
self.results[msg_id] = res
content[msg_id] = res
if len(theids) == 1 and failures:
raise failures[0]
error.collect_exceptions(failures, "result_status")
return content
def queue_status(self, targets='all', verbose=False):
"""Fetch the status of engine queues.
Parameters
----------
targets : int/str/list of ints/strs
the engines whose states are to be queried.
default : all
verbose : bool
Whether to return lengths only, or lists of ids for each element
"""
if targets == 'all':
# allow 'all' to be evaluated on the engine
engine_ids = None
else:
engine_ids = self._build_targets(targets)[1]
content = dict(targets=engine_ids, verbose=verbose)
reply = self._send_recv(self._query_stream, "queue_request", content=content)
content = reply['content']
status = content.pop('status')
if status != 'ok':
raise self._unwrap_exception(content)
content = util.int_keys(content)
if isinstance(targets, int):
return content[targets]
else:
return content
def _msg_ids_from_target(self, targets=None):
"""Build a list of msg_ids from the list of engine targets"""
if not targets: # needed as _build_targets otherwise uses all engines
return []
target_ids = self._build_targets(targets)[0]
return [md_id for md_id in self.metadata if self.metadata[md_id]["engine_uuid"] in target_ids]
def _msg_ids_from_jobs(self, jobs=None):
"""Given a 'jobs' argument, convert it to a list of msg_ids.
Can be either one or a list of:
- msg_id strings
- integer indices to this Client's history
- AsyncResult objects
"""
if not isinstance(jobs, (list, tuple, set, types.GeneratorType)):
jobs = [jobs]
msg_ids = []
for job in jobs:
if isinstance(job, int):
msg_ids.append(self.history[job])
elif isinstance(job, string_types):
msg_ids.append(job)
elif isinstance(job, AsyncResult):
msg_ids.extend(job.msg_ids)
else:
raise TypeError("Expected msg_id, int, or AsyncResult, got %r" % job)
return msg_ids
def _asyncresult_from_jobs(self, jobs=None, owner=False):
"""Construct an AsyncResult from msg_ids or asyncresult objects"""
if not isinstance(jobs, (list, tuple, set, types.GeneratorType)):
single = True
jobs = [jobs]
else:
single = False
futures = []
msg_ids = []
for job in jobs:
if isinstance(job, int):
job = self.history[job]
if isinstance(job, string_types):
if job in self._futures:
futures.append(job)
elif job in self.results:
f = MessageFuture(job)
f.set_result(self.results[job])
f.output = Future()
f.output.metadata = self.metadata[job]
f.output.set_result(None)
futures.append(f)
else:
msg_ids.append(job)
elif isinstance(job, AsyncResult):
if job._children:
futures.extend(job._children)
else:
msg_ids.extend(job.msg_ids)
else:
raise TypeError("Expected msg_id, int, or AsyncResult, got %r" % job)
if msg_ids:
if single:
msg_ids = msg_ids[0]
return AsyncHubResult(self, msg_ids, owner=owner)
else:
if single and futures:
futures = futures[0]
return AsyncResult(self, futures, owner=owner)
def purge_local_results(self, jobs=[], targets=[]):
"""Clears the client caches of results and their metadata.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_local_results('all')` to scrub everything from the Clients's
results and metadata caches.
After this call all `AsyncResults` are invalid and should be discarded.
If you must "reget" the results, you can still do so by using
`client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
redownload the results from the hub if they are still available
(i.e `client.purge_hub_results(...)` has not been called.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be purged.
targets : int/list of ints
The engines, by integer ID, whose entire result histories are to be purged.
Raises
------
RuntimeError : if any of the tasks to be purged are still outstanding.
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if jobs == 'all':
if self.outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % self.outstanding)
self.results.clear()
self.metadata.clear()
self._futures.clear()
self._output_futures.clear()
else:
msg_ids = set()
msg_ids.update(self._msg_ids_from_target(targets))
msg_ids.update(self._msg_ids_from_jobs(jobs))
still_outstanding = self.outstanding.intersection(msg_ids)
if still_outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % still_outstanding)
for mid in msg_ids:
self.results.pop(mid, None)
self.metadata.pop(mid, None)
self._futures.pop(mid, None)
self._output_futures.pop(mid, None)
def purge_hub_results(self, jobs=[], targets=[]):
"""Tell the Hub to forget results.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub everything from the Hub's db.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if targets:
targets = self._build_targets(targets)[1]
# construct msg_ids from jobs
if jobs == 'all':
msg_ids = jobs
else:
msg_ids = self._msg_ids_from_jobs(jobs)
content = dict(engine_ids=targets, msg_ids=msg_ids)
reply = self._send_recv(self._query_stream, "purge_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
def purge_results(self, jobs=[], targets=[]):
"""Clears the cached results from both the hub and the local client
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub every cached result from both the Hub's and
the Client's db.
Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
the same arguments.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
self.purge_local_results(jobs=jobs, targets=targets)
self.purge_hub_results(jobs=jobs, targets=targets)
def purge_everything(self):
"""Clears all content from previous Tasks from both the hub and the local client
In addition to calling `purge_results("all")` it also deletes the history and
other bookkeeping lists.
"""
self.purge_results("all")
self.history = []
self.session.digest_history.clear()
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
reply = self._send_recv(self._query_stream, "history_request", content={})
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history']
def db_query(self, query, keys=None):
"""Query the Hub's TaskRecord database
This will return a list of task record dicts that match `query`
Parameters
----------
query : mongodb query dict
The search dict. See mongodb query docs for details.
keys : list of strs [optional]
The subset of keys to be returned. The default is to fetch everything but buffers.
'msg_id' will *always* be included.
"""
if isinstance(keys, string_types):
keys = [keys]
content = dict(query=query, keys=keys)
reply = self._send_recv(self._query_stream, "db_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
records = content['records']
buffer_lens = content['buffer_lens']
result_buffer_lens = content['result_buffer_lens']
buffers = reply['buffers']
has_bufs = buffer_lens is not None
has_rbufs = result_buffer_lens is not None
for i,rec in enumerate(records):
# unpack datetime objects
for hkey in ('header', 'result_header'):
if hkey in rec:
rec[hkey] = util.extract_dates(rec[hkey])
for dtkey in ('submitted', 'started', 'completed', 'received'):
if dtkey in rec:
rec[dtkey] = util._parse_date(rec[dtkey])
# relink buffers
if has_bufs:
blen = buffer_lens[i]
rec['buffers'], buffers = buffers[:blen],buffers[blen:]
if has_rbufs:
blen = result_buffer_lens[i]
rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
return records
__all__ = [ 'Client' ]
|
build_environment.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import iteritems
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.store
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
environment = compiler.environment
for command, variable in iteritems(environment):
if command == 'set':
for name, value in iteritems(variable):
env.set(name, value)
elif command == 'unset':
for name, _ in iteritems(variable):
env.unset(name)
elif command == 'prepend-path':
for name, value in iteritems(variable):
env.prepend_path(name, value)
elif command == 'append-path':
for name, value in iteritems(variable):
env.append_path(name, value)
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
implicit_rpaths = compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS', ':'.join(implicit_rpaths))
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs') if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
assert jobs is not None, "no default set for config:build_jobs"
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return rpaths
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
build_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, build_env)
set_build_environment_variables(pkg, build_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, build_env)
build_env.extend(
modifications_from_dependencies(pkg.spec, context='build')
)
if (not dirty) and (not build_env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
set_module_variables_for_package(pkg)
pkg.setup_build_environment(build_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the build_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
# Make sure nothing's strange about the Spack environment.
validate(build_env, tty.warn)
build_env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
for dspec in spec.traverse(order='post', root=False, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopIteration as e:
# StopIteration is used to stop installations
# before the final stage, mainly for debug purposes
tty.msg(e)
child_pipe.send(None)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
if isinstance(child_result, ChildError):
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s\n' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
EPSILON = 1e-8
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
with tf.variable_scope(scope):
layer = input_placeholder
for _ in range(n_layers):
layer = tf.layers.dense(layer, size, activation=activation)
output_placeholder = tf.layers.dense(layer, output_size, output_activation)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(dtype=tf.float32, shape=[None, self.ob_dim], name='observation')
if self.discrete:
sy_ac_na = tf.placeholder(dtype=tf.int32, shape=[None], name='action')
else:
sy_ac_na = tf.placeholder(dtype=tf.float32, shape=[None, self.ac_dim], name='actions')
sy_adv_n = tf.placeholder(dtype=tf.float32, shape=[None], name='advantage')
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
name_scope = "nn_policy"
if self.discrete:
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, name_scope, self.n_layers, self.size, activation=tf.nn.relu, output_activation=None)
return sy_logits_na
else:
sy_mean = build_mlp(sy_ob_no, self.ac_dim, name_scope, self.n_layers, self.size, activation=tf.nn.relu, output_activation=None)
sy_logstd = tf.Variable(tf.zeros(self.ac_dim), name='sy_logstd')
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
with tf.variable_scope('sampled_action'):
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis=1)
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(shape=tf.shape(sy_mean))
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)
else:
sy_mean, sy_logstd = policy_parameters
probabilities = tf.distributions.Normal(sy_mean, tf.exp(sy_logstd)).prob(sy_ac_na)
sy_logprob_n = tf.log(tf.reduce_prod(probabilities, axis=1))
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
with tf.variable_scope('log_probability_weighted_by_advantege'):
sy_weighted_logprob_n = tf.multiply(self.sy_logprob_n, self.sy_adv_n)
with tf.variable_scope('loss'):
self.sy_loss = tf.reduce_mean(sy_weighted_logprob_n)
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.sy_loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(dtype=tf.float32, shape=[None], name='reward_label')
self.baseline_loss = tf.losses.mean_squared_error(self.sy_target_n, self.baseline_prediction, scope='nn_baseline_loss')
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac, policy_parameters = self.sess.run([self.sy_sampled_ac, self.policy_parameters], feed_dict={self.sy_ob_no: ob[None, :]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
q_n = []
if self.reward_to_go:
for path_i in re_n:
reversed_path = np.flip(path_i)
reward_i = []
curr_reward = 0
for r in reversed_path:
curr_reward = curr_reward * self.gamma + r
reward_i.append(curr_reward)
reward_i.reverse()
q_n += reward_i
else: # full reward. at every time step gradient is weighted by the full discounted reward of the episode.
for path_i in re_n:
reversed_path = np.flip(path_i)
curr_reward = 0
for r in reversed_path:
curr_reward = curr_reward * self.gamma + r
q_n += [curr_reward for i in range(len(path_i))]
return np.array(q_n)
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no})
b_n = (b_n - b_n.mean()) / (b_n.std() + EPSILON) * q_n.std() + q_n.mean()
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + EPSILON)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
target_n = (q_n - q_n.mean()) / (q_n.std() + EPSILON)
_, target_loss = self.sess.run([self.baseline_update_op, self.baseline_loss], feed_dict={self.sy_ob_no: ob_no, self.sy_target_n: target_n})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
_, loss, policy_parameters, logprob_n = self.sess.run([self.update_op, self.sy_loss, self.policy_parameters, self.sy_logprob_n],
feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
return loss
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
main.py
|
import select
from queue import Empty
from game.chunkthread import *
from game.world import *
from kademlia.node import Node
import socket, sys, json
class DHTThread: # todo: make this gracefully die/integrate it into the select stuff
def __init__(self, socket, dht : DHTServer):
self.socket = socket
self.dht = dht
self.thread = threading.Thread(target=self.mainloop)
self.thread.setDaemon(True)
self.thread.start()
def mainloop(self):
while True:
data = self.socket.recv(1024)
if data:
msg = json.loads(data[1:].decode())
if data[0] == 0:
msg = tuple(msg)
#print("Chunk location query")
future = asyncio.run_coroutine_threadsafe(self.dht.get_chunk(msg), dht.loop)
addr = future.result()
if addr is None:
#print("address invalid/chunk doesn't exist")
future = asyncio.run_coroutine_threadsafe(self.dht.generate_chunk(msg), dht.loop)
addr = future.result()
self.socket.send(addr.encode() + b'\n')
elif data[0] == 1:
name = msg["name"]
print(f"Looking for {name}")
future = asyncio.run_coroutine_threadsafe(self.dht.get_player(name), self.dht.loop)
player = future.result()
if player:
x,y,z = player["pos"]
self.socket.send(json.dumps({"x":x,"y":y,"z":z}).encode())
else:
self.socket.send(json.dumps({"x":0,"y":32,"z":0}).encode())
else:
self.socket.close()
return
if len(sys.argv) < 3:
print("Usage: <command> <bind ip> <base port> [-i <id>] [-b <bootstrap address> <bootstrap port> <bootstrap id>]")
sys.exit()
bind_ip = sys.argv[1]
base_port = int(sys.argv[2])
id = None
if "-i" in sys.argv:
id = int(sys.argv[1+sys.argv.index("-i")])
dht = DHTServer((bind_ip, base_port), id=id)
dht_ready = threading.Event()
def ctrl_loop():
dht_ready.wait()
ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ss.bind(('0.0.0.0', base_port + 1))
ss.setblocking(0)
ss.listen(5)
chunks = {}
loaded = {}
clients = {}
print("initialising game server")
t = time.monotonic()
while True:
if time.monotonic() - t > 3600:
for coord in chunks:
asyncio.run_coroutine_threadsafe(dht.republish_chunk(coord, (bind_ip, base_port + 1)), dht.loop)
t = time.monotonic()
sockets = list(clients.keys()) + [ss]
for s in sockets:
if s.fileno() == -1:
s.close()
del clients[s]
try:
readable, writable, exceptional = select.select(sockets, list(filter(lambda x: x in clients and len(clients[x].to_send) > 0, sockets)), sockets, 10)
for r in readable:
if r == ss:
s, addr = ss.accept()
s.setblocking(1)
data = s.recv(1024)
msg = json.loads(data.decode())
if msg["type"] == "connect":
chunk_coord = tuple(msg["chunk"])
player = msg["player"]
print(f"player {player} is connecting to chunk at {chunk_coord}")
if chunk_coord not in chunks: # if chunk doesn't exist
s.send(b'no')
s.close()
else:
if chunk_coord not in loaded: # if chunk not loaded then load it
loaded[chunk_coord] = ChunkThread(dht, chunks[chunk_coord])
s.send(b'ok') # start normal game comms
s.setblocking(0)
client = Client(ClientType.PLAYER, loaded[chunk_coord], msg["player"], s)
loaded[chunk_coord].add_client(client) # register to chunk
clients[s] = client
elif msg["type"] == "generate":
chunk_coord = tuple(msg["chunk"])
if chunk_coord not in chunks:
chunks[chunk_coord] = Chunk(*chunk_coord)
s.send(b'ok')
elif msg["type"] == "dht":
DHTThread(s, dht)
s.send(b'ok')
elif msg["type"] == "ping":
s.send(b'pong')
else:
data = r.recv(1024)
if data:
client = clients[r]
for c in [data[i:i+1] for i in range(len(data))]:
if c == b'\n':
client.recv(client)
else:
client.buf += c
else:
if clients[r].chunk_thread.remove_client(clients[r]):
clients[r].chunk_thread.stop()
del loaded[clients[r].chunk_thread.chunk.location]
del clients[r]
if r in writable:
writable.remove(r)
if r in exceptional:
exceptional.remove(r)
r.close()
for w in writable:
client = clients[w]
if len(client.to_send) < 1: continue
data = client.to_send.popleft()
sent = w.send(data)
if sent != len(data):
client.to_send.appendleft(data[sent:])
for e in exceptional:
if clients[e].chunk_thread.remove_client(clients[e]):
clients[e].chunk_thread.stop()
del loaded[clients[e].chunk_thread.chunk.location]
del clients[e]
e.close()
except OSError:
print(list(map(lambda x: x.fileno(), sockets)))
except ValueError:
print(list(map(lambda x: x.fileno(), sockets)))
game_server_ctrl_thread = threading.Thread(target=ctrl_loop)
game_server_ctrl_thread.setDaemon(True)
game_server_ctrl_thread.start()
async def run():
print("initialising DHT")
if "-b" in sys.argv: # have supplied bootstrap but not ID
await dht.run(bootstrap=Node(int(sys.argv[sys.argv.index("-b")+3]), (sys.argv[sys.argv.index("-b")+1], int(sys.argv[sys.argv.index("-b")+2]))))
else:
await dht.run()
dht_ready.set()
while True:
await asyncio.sleep(3600)
asyncio.run(run())
|
artists.py
|
from flask import Blueprint, request, make_response, render_template, session, redirect, url_for, g, flash
import re
from ...utils.utils import make_template, count_to_pages, get_offset_from_url_query, get_value
from ...internals.database.database import get_cursor
from ...lib.artist import get_artist, get_artist_post_count, get_top_artists_by_faves, get_count_of_artists_faved, get_top_artists_by_recent_faves, get_count_of_artists_recently_faved, get_artist_search_results, get_recently_indexed_artists, get_artist_count, delete_artist, add_artist_to_dnp_list
from ...lib.post import get_artist_posts_for_listing, is_post_flagged, get_artist_post_search_results
from ...lib.favorites import is_artist_favorited
from ...lib.account import load_account, is_admin
from ...utils.flask_thread import FlaskThread
artists = Blueprint('artists', __name__)
@artists.route('/artists', methods=['GET', 'POST'])
def get_list():
query = request.args.get('query')
service = request.args.get('service')
offset = get_offset_from_url_query()
if query is None and service is None:
query = ''
service = ''
if query is not None:
query = query.strip()
(results, total_count) = get_artist_search_results(query, service, offset, 25)
g.data['display'] = 'search results'
g.data['results'] = results
g.data['max_pages'] = count_to_pages(total_count)
return make_template('artist_list_search.html', 200)
@artists.route('/artists/popular')
def get_popular():
offset = get_offset_from_url_query()
g.data['display'] = 'most popular artists'
g.data['results'] = get_top_artists_by_faves(offset, 25)
g.data['max_pages'] = count_to_pages(get_count_of_artists_faved())
return make_template('artist_list_search.html', 200)
@artists.route('/artists/trending')
def get_trending():
offset = get_offset_from_url_query()
g.data['display'] = 'trending artists'
g.data['results'] = get_top_artists_by_recent_faves(offset, 25)
g.data['max_pages'] = count_to_pages(get_count_of_artists_recently_faved())
return make_template('artist_list_search.html', 200)
@artists.route('/artists/recent')
def get_recent():
offset = get_offset_from_url_query()
g.data['display'] = 'recently added artists'
g.data['results'] = get_recently_indexed_artists(offset, 25)
g.data['max_pages'] = count_to_pages(get_artist_count())
return make_template('artist_list_search.html', 200)
@artists.route('/artists/<service>/<artist_id>')
def get(service, artist_id):
offset = get_offset_from_url_query()
query = request.args.get('query')
artist = get_artist(artist_id)
if artist is None:
return redirect(url_for('artists.get_list'))
is_favorited = False
account = load_account()
if account is not None:
is_favorited = is_artist_favorited(account['id'], artist_id)
g.data['is_admin'] = is_admin(account)
if query is not None:
query = query.strip()
(posts, total_count) = ([], 0)
if query is None:
(posts, total_count) = get_artist_post_page(artist_id, offset)
else:
(posts, total_count) = get_artist_post_search_results(query, artist_id, offset)
g.data['results'] = posts
g.data['artist'] = artist
g.data['max_pages'] = count_to_pages(total_count)
g.data['artist']['is_favorited'] = is_favorited
g.data['artist']['display_data'] = make_artist_display_data(artist)
return make_template('artist/artist.html', 200)
@artists.route('/artists/delete/<artist_id>', methods=['POST'])
def delete(artist_id):
account = load_account()
if account is None:
return '', 403
if not is_admin(account):
return '', 403
add_artist_to_dnp_list(artist_id)
FlaskThread(target=delete_artist, args=(artist_id,)).start()
flash(f'Starting deletion of artist {artist_id}. If the artist has a lot of posts, it may take a while to delete them.')
return redirect(url_for('artists.get_list'))
def get_artist_post_page(artist_id, offset):
posts = get_artist_posts_for_listing(artist_id, offset, 'published desc')
total_count = get_artist_post_count(artist_id)
return (posts, total_count)
def make_artist_display_data(artist):
data = {}
if artist['service'] == 'patreon':
data['service'] = 'Patreon'
data['href'] = f'https://www.patreon.com/user?u={artist["service_id"]}'
elif artist['service'] == 'fanbox':
data['service'] = 'Fanbox'
data['href'] = f'https://www.pixiv.net/fanbox/creator/{artist["service_id"]}'
elif artist['service'] == 'gumroad':
data['service'] = 'Gumroad'
data['href'] = f'https://gumroad.com/{artist["service_id"]}'
elif artist['service'] == 'subscribestar':
data['service'] = 'SubscribeStar'
data['href'] = f'https://subscribestar.adult/{artist["service_id"]}'
elif artist['service'] == 'fantia':
data['service'] = 'Fantia'
data['href'] = f'https://fantia.jp/fanclubs/{artist["service_id"]}'
return data
|
random_shuffle_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:4265
# https://github.com/imdone/tensorflow/issues/4263
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testEmptyDequeueUpToWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3843
# https://github.com/imdone/tensorflow/issues/3842
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3288
# https://github.com/imdone/tensorflow/issues/3287
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
def blocking_dequeue():
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3329
# https://github.com/imdone/tensorflow/issues/3328
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3792
# https://github.com/imdone/tensorflow/issues/3791
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:4266
# https://github.com/imdone/tensorflow/issues/4264
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3845
# https://github.com/imdone/tensorflow/issues/3844
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3292
# https://github.com/imdone/tensorflow/issues/3291
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3331
# https://github.com/imdone/tensorflow/issues/3330
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3794
# https://github.com/imdone/tensorflow/issues/3793
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:4267
# https://github.com/imdone/tensorflow/issues/4265
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3847
# https://github.com/imdone/tensorflow/issues/3846
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO (mrry): Figure out how to do this without sleeping. id:3294
# https://github.com/imdone/tensorflow/issues/3293
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO (mrry): Figure out how to do this without sleeping. id:3333
# https://github.com/imdone/tensorflow/issues/3332
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = size_t.eval()
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO (mrry): Figure out how to do this without sleeping. id:3796
# https://github.com/imdone/tensorflow/issues/3795
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
test_vrf.py
|
import sys
import time
import threading
import Queue
import yaml
import json
import random
import re
import logging
from collections import OrderedDict
from natsort import natsorted
from netaddr import IPNetwork
from functools import partial
import pytest
from ptf_runner import ptf_runner
from common.utilities import wait_until
"""
During vrf testing, a vrf basic configuration need to be setup before any tests,
and cleanup after all tests. Both of the two tasks should be called only once.
A module-scoped fixture `setup_vrf` is added to accompilsh the setup/cleanup tasks.
We want to use ansible_adhoc/testbed fixtures during the setup/cleanup stages, but
1. Injecting fixtures to xunit-style setup/teardown functions is not support by
[now](https://github.com/pytest-dev/pytest/issues/5289).
2. Calling a fixture function directly is deprecated.
So, we prefer a fixture rather than xunit-style setup/teardown functions.
"""
# global variables
g_vars = {}
# helper functions
def get_vlan_members(vlan_name, cfg_facts):
tmp_member_list = []
for m in cfg_facts['VLAN_MEMBER'].keys():
v, port = m.split('|')
if vlan_name == v:
tmp_member_list.append(port)
return natsorted(tmp_member_list)
def get_pc_members(portchannel_name, cfg_facts):
tmp_member_list = []
for m in cfg_facts['PORTCHANNEL_MEMBER'].keys():
pc, port = m.split('|')
if portchannel_name == pc:
tmp_member_list.append(port)
return natsorted(tmp_member_list)
def get_intf_ips(interface_name, cfg_facts):
prefix_to_intf_table_map = {
'Vlan': 'VLAN_INTERFACE',
'PortChannel': 'PORTCHANNEL_INTERFACE',
'Ethernet': 'INTERFACE',
'Loopback': 'LOOPBACK_INTERFACE'
}
intf_table_name = None
ip_facts = {
'ipv4': [],
'ipv6': []
}
for pfx, t_name in prefix_to_intf_table_map.iteritems():
if pfx in interface_name:
intf_table_name = t_name
break
if intf_table_name is None:
return ip_facts
for intf in cfg_facts[intf_table_name]:
if '|' in intf:
if_name, ip = intf.split('|')
if if_name == interface_name:
ip = IPNetwork(ip)
if ip.version == 4:
ip_facts['ipv4'].append(ip)
else:
ip_facts['ipv6'].append(ip)
return ip_facts
def get_cfg_facts(duthost):
tmp_facts = json.loads(duthost.shell("sonic-cfggen -d --print-data")['stdout']) # return config db contents(running-config)
port_name_list_sorted = natsorted(tmp_facts['PORT'].keys())
port_index_map = {}
for idx, val in enumerate(port_name_list_sorted):
port_index_map[val] = idx
tmp_facts['config_port_indices'] = port_index_map
return tmp_facts
def get_host_facts(duthost):
return duthost.setup()['ansible_facts']
def get_vrf_intfs(cfg_facts):
intf_tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']
vrf_intfs = {}
for table in intf_tables:
for intf, attrs in cfg_facts.get(table, {}).iteritems():
if '|' not in intf:
vrf = attrs['vrf_name']
if vrf not in vrf_intfs:
vrf_intfs[vrf] = {}
vrf_intfs[vrf][intf] = get_intf_ips(intf, cfg_facts)
return vrf_intfs
def get_vrf_ports(cfg_facts):
'''
:return: vrf_member_port_indices, vrf_intf_member_port_indices
'''
vlan_member = cfg_facts['VLAN_MEMBER'].keys()
pc_member = cfg_facts['PORTCHANNEL_MEMBER'].keys()
member = vlan_member + pc_member
vrf_intf_member_port_indices = {}
vrf_member_port_indices = {}
vrf_intfs = get_vrf_intfs(cfg_facts)
for vrf, intfs in vrf_intfs.iteritems():
vrf_intf_member_port_indices[vrf] = {}
vrf_member_port_indices[vrf] = []
for intf in intfs:
vrf_intf_member_port_indices[vrf][intf] = natsorted(
[ cfg_facts['config_port_indices'][m.split('|')[1]] for m in filter(lambda m: intf in m, member) ]
)
vrf_member_port_indices[vrf].extend(vrf_intf_member_port_indices[vrf][intf])
vrf_member_port_indices[vrf] = natsorted(vrf_member_port_indices[vrf])
return vrf_intf_member_port_indices, vrf_member_port_indices
def ex_ptf_runner(ptf_runner, exc_queue, **kwargs):
'''
With this simple warpper function, we could use a Queue to store the
exception infos and check it later in main thread.
Example:
refer to test 'test_vrf_swss_warm_reboot'
'''
try:
ptf_runner(**kwargs)
except Exception:
exc_queue.put(sys.exc_info())
def finalize_warmboot(duthost, comp_list=None, retry=30, interval=5):
'''
Check if componets finish warmboot(reconciled).
'''
DEFAULT_COMPONENT_LIST = ['orchagent', 'neighsyncd']
EXP_STATE = 'reconciled'
comp_list = comp_list or DEFAULT_COMPONENT_LIST
# wait up to $retry * $interval secs
for _ in range(retry):
for comp in comp_list:
state = duthost.shell('/usr/bin/redis-cli -n 6 hget "WARM_RESTART_TABLE|{}" state'.format(comp), module_ignore_errors=True)['stdout']
logging.info("{} : {}".format(comp, state))
if EXP_STATE == state:
comp_list.remove(comp)
if len(comp_list) == 0:
break
time.sleep(interval)
logging.info("Slept {} seconds!".format(interval))
return comp_list
def check_interface_status(duthost, up_ports):
intf_facts = duthost.interface_facts(up_ports=up_ports)['ansible_facts']
if len(intf_facts['ansible_interface_link_down_ports']) != 0:
logging.info("Some ports went down: {} ...".format(intf_facts['ansible_interface_link_down_ports']))
return False
return True
def check_bgp_peer_state(duthost, vrf, peer_ip, expected_state):
peer_info = json.loads(duthost.shell("vtysh -c 'show bgp vrf {} neighbors {} json'".format(vrf, peer_ip))['stdout'])
logging.debug("Vrf {} bgp peer {} infos: {}".format(vrf, peer_ip, peer_info))
try:
peer_state = peer_info[peer_ip].get('bgpState', 'Unknown')
except:
peer_state = 'Unknown'
if peer_state != expected_state:
logging.info("Vrf {} bgp peer {} is {}, exptected {}!".format(vrf, peer_ip, peer_state, expected_state))
return False
return True
def check_bgp_facts(duthost, cfg_facts):
result = {}
for neigh in cfg_facts['BGP_NEIGHBOR']:
if '|' not in neigh:
vrf = 'default'
peer_ip = neigh
else:
vrf, peer_ip = neigh.split('|')
result[(vrf, peer_ip)] = check_bgp_peer_state(duthost, vrf, peer_ip, expected_state='Established')
return all(result.values())
# FIXME later may move to "common.reboot"
#
# The reason to introduce a new 'reboot' here is due to
# the difference of fixture 'localhost' between the two 'reboot' functions.
#
# 'common.reboot' request *ansible_fixtures.localhost*,
# but here it request *common.devices.Localhost*.
def reboot(duthost, localhost, timeout=120, basic_check=True):
duthost.shell("nohup reboot &")
dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).address
logging.info('waiting for dut to go down')
res = localhost.wait_for(host=dut_ip,
port=22,
state="stopped",
delay=10,
timeout=timeout,
module_ignore_errors=True)
if res.is_failed:
raise Exception('DUT did not shutdown in {}s'.format(timeout))
logging.info('waiting for dut to startup')
res = localhost.wait_for(host=dut_ip,
port=22,
state="started",
delay=10,
timeout=timeout,
module_ignore_errors=True)
if res.is_failed:
raise Exception('DUT did not startup in {}s'.format(timeout))
# Basic check after reboot
if basic_check:
assert wait_until(timeout, 10, duthost.critical_services_fully_started), \
"All critical services should fully started!{}".format(duthost.critical_services)
def setup_vrf_cfg(duthost, localhost, cfg_facts):
'''
setup vrf configuration on dut before test suite
'''
# FIXME
# For vrf testing, we should create a new vrf topology
# might named to be 't0-vrf', deploy with minigraph templates.
#
# But currently vrf related schema does not properly define in minigraph.
# So we generate and deploy vrf basic configuration with a vrf jinja2 template,
# later should move to minigraph or a better way(VRF and BGP cli).
from copy import deepcopy
cfg_t0 = deepcopy(cfg_facts)
cfg_t0.pop('config_port_indices', None)
# get members from Vlan1000, and move half of them to Vlan2000 in vrf basic cfg
ports = get_vlan_members('Vlan1000', cfg_facts)
vlan_ports = {'Vlan1000': ports[:len(ports)/2],
'Vlan2000': ports[len(ports)/2:]}
extra_vars = {'cfg_t0': cfg_t0,
'vlan_ports': vlan_ports}
duthost.host.options['variable_manager'].extra_vars.update(extra_vars)
# backup config_db.json
duthost.shell("mv /etc/sonic/config_db.json /etc/sonic/config_db.json.bak")
duthost.template(src="vrf/vrf_config_db.j2", dest="/tmp/config_db_vrf.json")
duthost.shell("cp /tmp/config_db_vrf.json /etc/sonic/config_db.json")
# FIXME use a better way to load config
reboot(duthost, localhost)
def cleanup_vrf_cfg(duthost, localhost):
'''
teardown after test suite
'''
# recover config_db.json
duthost.shell("cp /etc/sonic/config_db.json.bak /etc/sonic/config_db.json")
duthost.shell("rm /etc/sonic/config_db.json.bak")
# FIXME use a better way to load config
reboot(duthost, localhost)
def setup_vlan_peer(duthost, ptfhost, cfg_facts):
'''
setup vlan peer ip addresses on peer port(ptf).
Example:
vid local-port peer-port peer-macvlan-dev peer-namespace peer-ip
Vlan1000 Ethernet1 eth1 e1mv1 ns1000 192.168.0.2/21
FC00:192::2/117
Vlan2000 Ethernet13 eth13 e13mv1 ns2000 192.168.0.2/21
FC00:192::2/117
'''
vlan_peer_ips = {}
vlan_peer_vrf2ns_map = {}
for vlan in cfg_facts['VLAN'].keys():
ns = 'ns' + vlan.strip('Vlan')
vrf = cfg_facts['VLAN_INTERFACE'][vlan]['vrf_name']
vlan_peer_vrf2ns_map[vrf] = ns
vlan_port = get_vlan_members(vlan, cfg_facts)[0]
vlan_peer_port = cfg_facts['config_port_indices'][vlan_port]
# deploy peer namespace on ptf
ptfhost.shell("ip netns add {}".format(ns))
# bind port to namespace
ptfhost.shell("ip link add e{}mv1 link eth{} type macvlan mode bridge".format(vlan_peer_port, vlan_peer_port))
ptfhost.shell("ip link set e{}mv1 netns {}".format(vlan_peer_port, ns))
ptfhost.shell("ip netns exec {} ip link set dev e{}mv1 up".format(ns, vlan_peer_port))
# setup peer ip on ptf
if (vrf, vlan_peer_port) not in vlan_peer_ips:
vlan_peer_ips[(vrf, vlan_peer_port)] = {'ipv4': [], 'ipv6': []}
vlan_ips = get_intf_ips(vlan, cfg_facts)
for ver, ips in vlan_ips.iteritems():
for ip in ips:
neigh_ip = IPNetwork("{}/{}".format(ip.ip+1, ip.prefixlen))
ptfhost.shell("ip netns exec {} ip address add {} dev e{}mv1".format(ns, neigh_ip, vlan_peer_port))
# ping to trigger neigh resolving
ping_cmd = 'ping' if neigh_ip.version ==4 else 'ping6'
duthost.shell("{} -I {} {} -c 1 -f -W1".format(ping_cmd, vrf, neigh_ip.ip), module_ignore_errors=True)
vlan_peer_ips[(vrf, vlan_peer_port)][ver].append(neigh_ip)
return vlan_peer_ips, vlan_peer_vrf2ns_map
def cleanup_vlan_peer(ptfhost, vlan_peer_vrf2ns_map):
for vrf, ns in vlan_peer_vrf2ns_map.iteritems():
ptfhost.shell("ip netns del {}".format(ns))
def gen_vrf_fib_file(vrf, testbed, ptfhost, dst_intfs, \
render_file, limited_podset_number=10, limited_tor_number=10):
extra_vars = {
'testbed_type': testbed['topo']['name'],
'props': g_vars['props'],
'intf_member_indices': g_vars['vrf_intf_member_port_indices'][vrf],
'dst_intfs': dst_intfs,
'limited_podset_number': limited_podset_number,
'limited_tor_number': limited_tor_number
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/vrf_fib.j2", dest=render_file)
def gen_vrf_neigh_file(vrf, ptfhost, render_file):
extra_vars = {
'intf_member_indices': g_vars['vrf_intf_member_port_indices'][vrf],
'intf_ips': g_vars['vrf_intfs'][vrf]
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/vrf_neigh.j2", dest=render_file)
# fixtures
@pytest.fixture(scope="module")
def host_facts(duthost):
return get_host_facts(duthost)
@pytest.fixture(scope="module")
def cfg_facts(duthost):
return get_cfg_facts(duthost)
@pytest.fixture(scope="module", autouse=True)
def setup_vrf(testbed, duthost, ptfhost, localhost, host_facts):
# --------------------- setup -----------------------
## Setup ptf
ptfhost.script("scripts/change_mac.sh")
ptfhost.copy(src="ptftests", dest="/root")
## Setup dut
duthost.critical_services = ["swss", "syncd", "database", "teamd", "bgp"] # Don't care about 'pmon' and 'lldp' here
cfg_t0 = get_cfg_facts(duthost) # generate cfg_facts for t0 topo
setup_vrf_cfg(duthost, localhost, cfg_t0)
cfg_facts = get_cfg_facts(duthost) # generate cfg_facts for t0-vrf topo, should not use cfg_facts fixture here.
duthost.shell("sonic-clear arp")
duthost.shell("sonic-clear nd")
duthost.shell("sonic-clear fdb all")
## Setup global variables
global g_vars
with open("../ansible/vars/topo_{}.yml".format(testbed['topo']['name']), 'r') as fh:
g_vars['topo_properties'] = yaml.safe_load(fh)
g_vars['props'] = g_vars['topo_properties']['configuration_properties']['common']
g_vars['vlan_peer_ips'], g_vars['vlan_peer_vrf2ns_map'] = setup_vlan_peer(duthost, ptfhost, cfg_facts)
g_vars['vrf_intfs'] = get_vrf_intfs(cfg_facts)
g_vars['vrf_intf_member_port_indices'], g_vars['vrf_member_port_indices'] = get_vrf_ports(cfg_facts)
# --------------------- Testing -----------------------
yield
# --------------------- Teardown -----------------------
cleanup_vlan_peer(ptfhost, g_vars['vlan_peer_vrf2ns_map'])
cleanup_vrf_cfg(duthost, localhost)
@pytest.fixture
def partial_ptf_runner(request, ptfhost, testbed, host_facts):
def _partial_ptf_runner(testname, **kwargs):
params = {'testbed_type': testbed['topo']['name'],
'router_mac': host_facts['ansible_Ethernet0']['macaddress']}
params.update(kwargs)
ptf_runner(host=ptfhost,
testdir="ptftests",
platform_dir="ptftests",
testname=testname,
params=params,
log_file="/tmp/{}.{}.log".format(request.cls.__name__, request.function.__name__))
return _partial_ptf_runner
# tests
class TestVrfCreateAndBind():
def test_vrf_in_kernel(self, duthost, cfg_facts):
# verify vrf in kernel
res = duthost.shell("ip link show type vrf | grep Vrf")
for vrf in cfg_facts['VRF'].keys():
assert vrf in res['stdout'], "%s should be created in kernel!" % vrf
for vrf, intfs in g_vars['vrf_intfs'].iteritems():
for intf in intfs:
res = duthost.shell("ip link show %s" % intf)
assert vrf in res['stdout'], "The master dev of interface %s should be %s !" % (intf, vrf)
def test_vrf_in_appl_db(self, duthost, cfg_facts):
# verify vrf in app_db
for vrf in cfg_facts['VRF'].keys():
res = duthost.shell("redis-cli -n 0 keys VRF_TABLE:%s" % vrf)
assert vrf in res['stdout'], "%s should be added in APPL_DB!" % vrf
for vrf, intfs in g_vars['vrf_intfs'].iteritems():
for intf in intfs:
res = duthost.shell("redis-cli -n 0 hgetall \"INTF_TABLE:%s\"" % intf)
assert vrf in res['stdout'], "The vrf of interface %s should be %s !" % (intf, vrf)
def test_vrf_in_asic_db(self, duthost, cfg_facts):
# verify vrf in asic_db
vrf_count = len(cfg_facts['VRF'].keys()) + 1 # plus default virtual router
res = duthost.shell("redis-cli -n 1 keys *VIRTUAL_ROUTER*")
assert len(res['stdout_lines']) == vrf_count
class TestVrfNeigh():
def test_ping_lag_neigh(self, duthost, cfg_facts):
for neigh in cfg_facts['BGP_NEIGHBOR']:
if '|' not in neigh:
continue
vrf, neigh_ip = neigh.split('|')
if IPNetwork(neigh_ip).version == 4:
ping_cmd = 'ping'
else:
ping_cmd = 'ping6'
cmd = "{} {} -I {} -c 3 -f".format(ping_cmd, neigh_ip, vrf)
duthost.shell(cmd)
def test_ping_vlan_neigh(self, duthost):
for (vrf, _), neigh_ips in g_vars['vlan_peer_ips'].iteritems():
for ver, ips in neigh_ips.iteritems():
ping_cmd = 'ping' if ver == 'ipv4' else 'ping6'
for ip in ips:
duthost.shell("{} {} -c 3 -I {} -f".format(ping_cmd, ip.ip, vrf))
def test_vrf1_neigh_ip_fwd(self, ptfhost, partial_ptf_runner):
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
partial_ptf_runner(
testname="vrf_test.FwdTest",
fwd_info="/tmp/vrf1_neigh.txt",
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
def test_vrf2_neigh_ip_fwd(self, ptfhost, partial_ptf_runner):
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
partial_ptf_runner(
testname="vrf_test.FwdTest",
fwd_info="/tmp/vrf2_neigh.txt",
src_ports=g_vars['vrf_member_port_indices']['Vrf2']
)
class TestVrfFib():
@pytest.fixture(scope="class", autouse=True)
def setup_fib_test(self, ptfhost, testbed):
gen_vrf_fib_file('Vrf1', testbed, ptfhost,
dst_intfs=['PortChannel0001', 'PortChannel0002'],
render_file='/tmp/vrf1_fib.txt')
gen_vrf_fib_file('Vrf2', testbed, ptfhost,
dst_intfs=['PortChannel0003', 'PortChannel0004'],
render_file='/tmp/vrf2_fib.txt')
def test_show_bgp_summary(self, duthost, cfg_facts):
props = g_vars['props']
route_count = props['podset_number'] * props['tor_number'] * props['tor_subnet_number']
for vrf in cfg_facts['VRF']:
bgp_summary_string = duthost.shell("show bgp vrf {} summary json".format(vrf))['stdout']
bgp_summary = json.loads(bgp_summary_string)
for info in bgp_summary.itervalues():
for peer, attr in info['peers'].iteritems():
prefix_count = attr['prefixReceivedCount']
assert int(prefix_count) == route_count, "%s should received %s route prefixs!" % (peer, route_count)
def test_vrf1_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/vrf1_fib.txt",
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
def test_vrf2_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/vrf2_fib.txt",
src_ports=g_vars['vrf_member_port_indices']['Vrf2']
)
class TestVrfIsolation():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_isolation(self, ptfhost, testbed):
gen_vrf_fib_file('Vrf1', testbed, ptfhost,
dst_intfs=['PortChannel0001', 'PortChannel0002'],
render_file='/tmp/vrf1_fib.txt')
gen_vrf_fib_file('Vrf2', testbed, ptfhost,
dst_intfs=['PortChannel0003', 'PortChannel0004'],
render_file='/tmp/vrf2_fib.txt')
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
def test_neigh_isolate_vrf1_from_vrf2(self, partial_ptf_runner):
# send packets from Vrf1
partial_ptf_runner(
testname="vrf_test.FwdTest",
fwd_info="/tmp/vrf2_neigh.txt",
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_neigh_isolate_vrf2_from_vrf1(self, partial_ptf_runner):
# send packets from Vrf2
partial_ptf_runner(
testname="vrf_test.FwdTest",
fwd_info="/tmp/vrf1_neigh.txt",
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
def test_fib_isolate_vrf1_from_vrf2(self, partial_ptf_runner):
# send packets from Vrf1
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/vrf2_fib.txt",
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_fib_isolate_vrf2_from_vrf1(self, partial_ptf_runner):
# send packets from Vrf2
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/vrf1_fib.txt",
pkt_action='drop',
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
class TestVrfAclRedirect():
c_vars = {}
@pytest.fixture(scope="class", autouse=True)
def setup_acl_redirect(self, duthost, cfg_facts):
# -------- Setup ----------
# make sure neighs from Vlan2000 are resolved
vlan_peer_port = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][0]
vlan_neigh_ip = g_vars['vlan_peer_ips'][('Vrf2', vlan_peer_port)]['ipv4'][0]
duthost.shell("ping {} -I {} -c 3 -f".format(vlan_neigh_ip.ip, 'Vrf2'))
vrf_intf_ports = g_vars['vrf_intf_member_port_indices']
src_ports = [vrf_intf_ports['Vrf1']['Vlan1000'][0]]
dst_ports = [vrf_intf_ports['Vrf1']['PortChannel0001']]
pc1_intf_ips = get_intf_ips('PortChannel0001', cfg_facts)
pc1_v4_neigh_ips = [ str(ip.ip+1) for ip in pc1_intf_ips['ipv4'] ]
pc1_v6_neigh_ips = [ str(ip.ip+1) for ip in pc1_intf_ips['ipv6'] ]
pc2_if_name = 'PortChannel0002'
pc2_if_ips = get_intf_ips(pc2_if_name, cfg_facts)
pc2_v4_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv4'] ]
pc2_v6_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv6'] ]
pc4_if_name = 'PortChannel0004'
pc4_if_ips = get_intf_ips(pc4_if_name, cfg_facts)
pc4_v4_neigh_ips = [ (pc4_if_name, str(ip.ip+1)) for ip in pc4_if_ips['ipv4'] ]
pc4_v6_neigh_ips = [ (pc4_if_name, str(ip.ip+1)) for ip in pc4_if_ips['ipv6'] ]
redirect_dst_ips = pc2_v4_neigh_ips + pc4_v4_neigh_ips
redirect_dst_ipv6s = pc2_v6_neigh_ips + pc4_v6_neigh_ips
redirect_dst_ports = []
redirect_dst_ports.append(vrf_intf_ports['Vrf1'][pc2_if_name])
redirect_dst_ports.append(vrf_intf_ports['Vrf2'][pc4_if_name])
self.c_vars['src_ports'] = src_ports
self.c_vars['dst_ports'] = dst_ports
self.c_vars['redirect_dst_ports'] = redirect_dst_ports
self.c_vars['pc1_v4_neigh_ips'] = pc1_v4_neigh_ips
self.c_vars['pc1_v6_neigh_ips'] = pc1_v6_neigh_ips
# load acl redirect configuration
extra_vars = {
'src_port': get_vlan_members('Vlan1000', cfg_facts)[0],
'redirect_dst_ips': redirect_dst_ips,
'redirect_dst_ipv6s': redirect_dst_ipv6s
}
duthost.host.options['variable_manager'].extra_vars.update(extra_vars)
duthost.template(src="vrf/vrf_acl_redirect.j2", dest="/tmp/vrf_acl_redirect.json")
duthost.shell("config load -y /tmp/vrf_acl_redirect.json")
# -------- Testing ----------
yield
# -------- Teardown ----------
duthost.shell("redis-cli -n 4 del 'ACL_RULE|VRF_ACL_REDIRECT_V4|rule1'")
duthost.shell("redis-cli -n 4 del 'ACL_RULE|VRF_ACL_REDIRECT_V6|rule1'")
duthost.shell("redis-cli -n 4 del 'ACL_TABLE|VRF_ACL_REDIRECT_V4'")
duthost.shell("redis-cli -n 4 del 'ACL_TABLE|VRF_ACL_REDIRECT_V6'")
def test_origin_ports_recv_no_pkts_v4(self, partial_ptf_runner):
# verify origin dst ports should not receive packets any more
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
src_ports=self.c_vars['src_ports'],
dst_ports=self.c_vars['dst_ports'],
dst_ips=self.c_vars['pc1_v4_neigh_ips']
)
def test_origin_ports_recv_no_pkts_v6(self, partial_ptf_runner):
# verify origin dst ports should not receive packets any more
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
src_ports=self.c_vars['src_ports'],
dst_ports=self.c_vars['dst_ports'],
dst_ips=self.c_vars['pc1_v6_neigh_ips']
)
def test_redirect_to_new_ports_v4(self, partial_ptf_runner):
# verify redicect ports should receive packets
partial_ptf_runner(
testname="vrf_test.FwdTest",
src_ports=self.c_vars['src_ports'],
dst_ports=self.c_vars['redirect_dst_ports'],
test_balancing=True,
balancing_test_times=1000,
balancing_test_ratio=1.0, # test redirect balancing
dst_ips=self.c_vars['pc1_v4_neigh_ips']
)
def test_redirect_to_new_ports_v6(self, partial_ptf_runner):
# verify redicect ports should receive packets
partial_ptf_runner(
testname="vrf_test.FwdTest",
src_ports=self.c_vars['src_ports'],
dst_ports=self.c_vars['redirect_dst_ports'],
test_balancing=True,
balancing_test_times=1000,
balancing_test_ratio=1.0, # test redirect balancing
dst_ips=self.c_vars['pc1_v6_neigh_ips']
)
class TestVrfLoopbackIntf():
c_vars = {}
announce_prefix = '10.10.10.0/26'
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_loopback(self, ptfhost, cfg_facts, testbed):
# -------- Setup ----------
lb0_ip_facts = get_intf_ips('Loopback0', cfg_facts)
vlan1000_ip_facts = get_intf_ips('Vlan1000', cfg_facts)
lb2_ip_facts = get_intf_ips('Loopback2', cfg_facts)
vlan2000_ip_facts = get_intf_ips('Vlan2000', cfg_facts)
self.c_vars['lb0_ip_facts'] = lb0_ip_facts
self.c_vars['lb2_ip_facts'] = lb2_ip_facts
self.c_vars['vlan1000_ip_facts'] = vlan1000_ip_facts
self.c_vars['vlan2000_ip_facts'] = vlan2000_ip_facts
# deploy routes to loopback
for ver, ips in lb0_ip_facts.iteritems():
for vlan_ip in vlan1000_ip_facts[ver]:
nexthop = vlan_ip.ip
break
for ip in ips:
ptfhost.shell("ip netns exec {} ip route add {} nexthop via {} ".format(g_vars['vlan_peer_vrf2ns_map']['Vrf1'], ip, nexthop))
for ver, ips in lb2_ip_facts.iteritems():
for vlan_ip in vlan2000_ip_facts[ver]:
nexthop = vlan_ip.ip
break
for ip in ips:
ptfhost.shell("ip netns exec {} ip route add {} nexthop via {} ".format(g_vars['vlan_peer_vrf2ns_map']['Vrf2'], ip, nexthop))
# -------- Testing ----------
yield
# -------- Teardown ----------
# routes on ptf could be flushed when remove vrfs
pass
def test_ping_vrf1_loopback(self, ptfhost, duthost):
for ver, ips in self.c_vars['lb0_ip_facts'].iteritems():
for ip in ips:
if ip.version == 4:
# FIXME Within a vrf, currently ping(4) does not support using
# an ip of loopback intface as source(it complains 'Cannot assign
# requested address'). An alternative is ping the loopback address
# from ptf
ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf1'], ip.ip))
else:
neigh_ip6 = self.c_vars['vlan1000_ip_facts']['ipv6'][0].ip + 1
duthost.shell("ping6 {} -I Vrf1 -I {} -c 3 -f -W2".format(neigh_ip6, ip.ip))
def test_ping_vrf2_loopback(self, ptfhost, duthost):
for ver, ips in self.c_vars['lb2_ip_facts'].iteritems():
for ip in ips:
if ip.version == 4:
# FIXME Within a vrf, currently ping(4) does not support using
# an ip of loopback intface as source(it complains 'Cannot assign
# requested address'). An alternative is ping the loopback address
# from ptf
ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf2'], ip.ip))
else:
neigh_ip6 = self.c_vars['vlan2000_ip_facts']['ipv6'][0].ip + 1
duthost.shell("ping6 {} -I Vrf2 -I {} -c 3 -f -W2".format(neigh_ip6, ip.ip))
@pytest.fixture
def setup_bgp_with_loopback(self, duthost, ptfhost, cfg_facts):
# ----------- Setup ----------------
# FIXME Create a dummy bgp session.
# Workaroud to overcome the bgp socket issue.
# When there are only vrf bgp sessions and
# net.ipv4.tcp_l3mdev_accept=1, bgpd(7.0) does
# not create bgp socket for sessions.
duthost.shell("vtysh -c 'config terminal' -c 'router bgp 65444'")
# vrf1 args, vrf2 use the same as vrf1
peer_range = IPNetwork(cfg_facts['BGP_PEER_RANGE']['BGPSLBPassive']['ip_range'][0])
ptf_speaker_ip = IPNetwork("{}/{}".format(peer_range[1], peer_range.prefixlen))
vlan_port = get_vlan_members('Vlan1000', cfg_facts)[0]
vlan_peer_port = cfg_facts['config_port_indices'][vlan_port]
ptf_direct_ip = g_vars['vlan_peer_ips'][('Vrf1', vlan_peer_port)]['ipv4'][0]
# add route to ptf_speaker_ip
for (vrf, vlan_peer_port), ips in g_vars['vlan_peer_ips'].iteritems():
nh = ips['ipv4'][0].ip
duthost.shell("vtysh -c 'configure terminal' -c 'ip route {} {} vrf {}'".format(peer_range, nh , vrf))
duthost.shell("ping {} -I {} -c 3 -f -W2".format(nh, vrf))
# add speaker ips to ptf macvlan ports
for vrf, vlan_peer_port in g_vars['vlan_peer_ips']:
ns = g_vars['vlan_peer_vrf2ns_map'][vrf]
ptfhost.shell("ip netns exec {} ip address add {} dev e{}mv1".format(ns, ptf_speaker_ip, vlan_peer_port))
res = duthost.shell("sonic-cfggen -m -d -y /etc/sonic/constants.yml -v \"constants.deployment_id_asn_map[DEVICE_METADATA['localhost']['deployment_id']]\"")
bgp_speaker_asn = res['stdout']
exabgp_dir = "/root/exabgp"
ptfhost.file(path=exabgp_dir, state="directory")
extra_vars = {
'exabgp_dir': exabgp_dir,
'announce_prefix': self.announce_prefix,
'peer_asn' : cfg_facts['DEVICE_METADATA']['localhost']['bgp_asn'],
'my_asn' : bgp_speaker_asn,
'speaker_ip': ptf_speaker_ip.ip,
'direct_ip' : ptf_direct_ip.ip,
'namespace' : g_vars['vlan_peer_vrf2ns_map'].values(),
'lo_addr' : get_intf_ips('Loopback0', cfg_facts)['ipv4'][0].ip
}
ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars)
ptfhost.template(src="vrf/bgp_speaker/config.j2", dest="%s/%s" % (exabgp_dir, 'config.ini'))
# deploy start script
ptfhost.template(src="vrf/bgp_speaker/start.j2", dest="%s/%s" % (exabgp_dir, 'start.sh'), mode="u+rwx")
# kill exabgp if any
ptfhost.shell("pkill exabgp || true")
# start exabgp instance
ptfhost.shell("bash %s/start.sh" % exabgp_dir)
# ensure exabgp started
ptfhost.shell("pgrep exabgp")
# make sure routes announced to bgp neighbors
time.sleep(10)
# -------- Testing ----------
yield
# -------- Teardown ---------
# del route to ptf_speaker_ip on dut
for (vrf, vlan_peer_port), ips in g_vars['vlan_peer_ips'].iteritems():
duthost.shell("vtysh -c 'configure terminal' -c 'no ip route {} {} vrf {}'".format(peer_range, ips['ipv4'][0], vrf))
# kill exabgp
ptfhost.shell("pkill exabgp || true")
# del speaker ips from ptf ports
for vrf, vlan_peer_port in g_vars['vlan_peer_ips']:
ns = g_vars['vlan_peer_vrf2ns_map'][vrf]
ptfhost.shell("ip netns exec {} ip address del {} dev e{}mv1".format(ns, ptf_speaker_ip, vlan_peer_port))
# FIXME workround to overcome the bgp socket issue
#duthost.shell("vtysh -c 'config terminal' -c 'no router bgp 65444'")
@pytest.mark.usefixtures('setup_bgp_with_loopback')
def test_bgp_with_loopback(self, duthost, cfg_facts):
peer_range = IPNetwork(cfg_facts['BGP_PEER_RANGE']['BGPSLBPassive']['ip_range'][0])
ptf_speaker_ip = IPNetwork("{}/{}".format(peer_range[1], peer_range.prefixlen))
for vrf in cfg_facts['VRF']:
bgp_info = json.loads(duthost.shell("vtysh -c 'show bgp vrf {} summary json'".format(vrf))['stdout'])
route_info = duthost.shell("vtysh -c 'show bgp vrf {} ipv4 {}'".format(vrf, self.announce_prefix))
# Verify bgp sessions are established
assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['state'] == 'Established', \
"Bgp peer {} should be Established!".format(ptf_speaker_ip.ip)
# Verify accepted prefixes of the dynamic neighbors are correct
assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['prefixReceivedCount'] == 1
class TestVrfWarmReboot():
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_warm_reboot(self, ptfhost, testbed):
# -------- Setup ----------
gen_vrf_fib_file('Vrf1', testbed, ptfhost,
dst_intfs=['PortChannel0001', 'PortChannel0002'],
render_file='/tmp/vrf1_fib.txt',
limited_podset_number=50,
limited_tor_number=16)
# -------- Testing ----------
yield
# -------- Teardown ----------
#FIXME Might need cold reboot if test failed?
pass
def test_vrf_swss_warm_reboot(self, duthost, cfg_facts, partial_ptf_runner):
# enable swss warm-reboot
duthost.shell("config warm_restart enable swss")
exc_que = Queue.Queue()
params = {
'ptf_runner': partial_ptf_runner,
'exc_queue': exc_que, # use for store exception infos
'testname': 'vrf_test.FibTest',
'fib_info': "/tmp/vrf1_fib.txt",
'src_ports': g_vars['vrf_member_port_indices']['Vrf1']
}
traffic_in_bg = threading.Thread(target=ex_ptf_runner, kwargs=params)
# send background traffic
traffic_in_bg.start()
logging.info("Start transmiting packets...")
# start swss warm-reboot
duthost.shell("service swss restart")
logging.info("Warm reboot swss...")
# wait until background traffic finished
traffic_in_bg.join()
logging.info("Transmit done.")
passed = True
if exc_que.qsize() != 0:
passed = False
exc_type, exc_obj, exc_trace = exc_que.get()
assert passed == True, "Traffic Test Failed \n {}".format(str(exc_obj))
# wait until components finish reconcile
tbd_comp_list = finalize_warmboot(duthost)
assert len(tbd_comp_list) == 0, \
"Some components didn't finish reconcile: {} ...".format(tbd_comp_list)
# basic check after warm reboot
assert wait_until(300, 20, duthost.critical_services_fully_started), \
"All critical services should fully started!{}".format(duthost.critical_services)
up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ]
assert wait_until(300, 20, check_interface_status, duthost, up_ports), \
"All interfaces should be up!"
def test_vrf_system_warm_reboot(self, duthost, cfg_facts, partial_ptf_runner):
exc_que = Queue.Queue()
params = {
'ptf_runner': partial_ptf_runner,
'exc_queue': exc_que, # use for store exception infos
'testname': 'vrf_test.FibTest',
'fib_info': "/tmp/vrf1_fib.txt",
'src_ports': g_vars['vrf_member_port_indices']['Vrf1']
}
traffic_in_bg = threading.Thread(target=ex_ptf_runner, kwargs=params)
# send background traffic
traffic_in_bg.start()
logging.info("Start transmiting packets...")
# start system warm-reboot
duthost.shell("nohup warm-reboot >/dev/null 2>&1 &")
logging.info("Warm reboot ...")
# wait until background traffic finished
traffic_in_bg.join()
logging.info("Transmit done.")
passed = True
if exc_que.qsize() != 0:
passed = False
exc_type, exc_obj, exc_trace = exc_que.get()
assert passed == True, "Test Failed: \n Exception infos => {}".format(str(exc_obj))
# wait until components finish reconcile
comp_list = ['orchagent', 'neighsyncd', 'bgp']
tbd_comp_list = finalize_warmboot(duthost, comp_list=comp_list)
assert len(tbd_comp_list) == 0, "Some components didn't finish reconcile: {} ...".format(tbd_comp_list)
# basic check after warm reboot
assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started"
up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ]
assert wait_until(300, 20, check_interface_status, duthost, up_ports), "Not all interfaces are up"
class TestVrfCapacity():
VRF_CAPACITY = 1000
# limit the number of vrfs to be covered to limit script execution time
TEST_COUNT = 100
src_base_vid = 2000
dst_base_vid = 3000
ipnet1 = IPNetwork("192.1.1.0/31")
ipnet2 = IPNetwork("192.2.1.0/31")
vrf_name_tpl = "Vrf_cap_{}"
sub_if_name_tpl = "e{}.v{}" # should not include 'eth'
route_prefix = "200.200.200.0/24"
cleanup_method = 'reboot' # reboot or remove
@pytest.fixture(scope="class")
def vrf_count(self, request):
vrf_capacity = request.config.option.vrf_capacity or self.VRF_CAPACITY # get cmd line option value, use default if none
return vrf_capacity - 3 # minus global(default) VRF and Vrf1/Vrf2
@pytest.fixture(scope="class")
def random_vrf_list(self, vrf_count, request):
test_count = request.config.option.vrf_test_count or self.TEST_COUNT # get cmd line option value, use default if none
return sorted(random.sample(xrange(1, vrf_count+1), min(test_count, vrf_count)))
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_capacity(self, duthost, ptfhost, localhost, cfg_facts, vrf_count, random_vrf_list, request):
"""
Setup $VRF_CAPACITY(minus global VRF and Vrf1/Vrf2) vrfs,
2 vlan interfaces per vrf,
1 ip address per vlan interface,
1 static route per vrf, it set $route_prefix(200.200.200.0/24) next_hop point to vlan_2's neigh ip,
use the 2rd member port of Vlan1000/2000 as trunk port.
Example:
VRF RIFs Vlan_Member_Port IP Neighbor_IP(on PTF) Static_Route
Vrf_Cap_1 Vlan2001 Ethernet2 192.1.1.0/31 192.1.1.1/31 ip route 200.200.200.0/24 192.2.1.1 vrf Vrf_Cap_1
Vlan3001 Ethernet14 192.2.1.0/31 192.2.1.1/31
Vrf_Cap_2 Vlan2002 Ethernet2 192.1.1.2/31 192.1.1.3/31 ip route 200.200.200.0/24 192.2.1.3 vrf Vrf_Cap_2
Vlan3002 Ethernet14 192.2.1.2/31 192.2.1.3/31
...
"""
# -------- Setup ----------
duthost.shell("logger -p INFO -- '-------- {} start!!! ---------'".format(request.cls.__name__))
# increase ipv4 neigh threshold to 2k
duthost.shell("sysctl -w net.ipv4.neigh.default.gc_thresh3=2048")
# use 2rd member port of Vlan1000/Vlan2000 as trunk port
dut_port1 = get_vlan_members('Vlan1000', cfg_facts)[1]
dut_port2 = get_vlan_members('Vlan2000', cfg_facts)[1]
ptf_port1 = g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'][1]
ptf_port2 = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][1]
# base ip range to be assigned to vlan rif
ip1 = self.ipnet1
ip2 = self.ipnet2
# setup $vrf_count vrfs on dut
dut_extra_vars = {
'vrf_count': vrf_count,
'src_base_vid': self.src_base_vid,
'dst_base_vid': self.dst_base_vid,
'vrf_name_tpl': self.vrf_name_tpl,
'ip1': ip1,
'ip2': ip2,
'dut_port1': dut_port1,
'dut_port2': dut_port2,
'route_prefix': self.route_prefix,
'op_code': 'add'
}
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
cfg_attrs_map = OrderedDict()
# In wrost case(1k vrfs, 2k rifs), remove a vlan could take 60~80ms
# ("VlanMgr::removeHostVlan ip link del Vlan{{vlan_id}} && bridge vlan del vid {{vlan_id}} dev Bridge self" take most of the time)
# So wait up to 5(s) + 80(ms) * 2(vlans per vrf) * vrf_count when remove vlans
cfg_attrs_map['vlan'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.08 * 2 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a vlan member from vlan could take 160~220ms
# ("vlanmgrd::removeHostVlanMember /sbin/bridge vlan show dev <devname>" take most of the time)
# So wait up to 5(s) + 220(ms) * 2(2 vlan members per vrf) * vrf_count
cfg_attrs_map['vlan_member'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.2 * 2 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a vrf could take 6~10ms
# So wait up to 5(s) + 10(ms) * vrf_count when remove vrfs
cfg_attrs_map['vrf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.01 * vrf_count}
# In wrost case(1k vrfs, 2k rifs), remove a rif could take 30~40ms
# ("IntfMgr::getIntfIpCount ip address show <alias> master <vrfName>" take most of the time)
# So wait up to 5(s) + 40(ms) * 2(rifs per vrf) * vrf_count when remove rifs
cfg_attrs_map['vrf_intf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5 + 0.04 * 2 * vrf_count}
cfg_attrs_map['vlan_intf'] = {'add_sleep_time': 2, 'remove_sleep_time': 5}
for cfg_name, attrs in cfg_attrs_map.iteritems():
src_template = 'vrf/vrf_capacity_{}_cfg.j2'.format(cfg_name)
render_file = '/tmp/vrf_capacity_{}_cfg.json'.format(cfg_name)
duthost.template(src=src_template, dest=render_file)
duthost.shell("sonic-cfggen -j {} --write-to-db".format(render_file))
time.sleep(attrs['add_sleep_time'])
# setup static routes
duthost.template(src='vrf/vrf_capacity_route_cfg.j2', dest='/tmp/vrf_capacity_route_cfg.sh', mode="0755")
duthost.shell("/tmp/vrf_capacity_route_cfg.sh")
# setup peer ip addresses on ptf
ptf_extra_vars = {
'vrf_count': vrf_count,
'src_base_vid': self.src_base_vid,
'dst_base_vid': self.dst_base_vid,
'sub_if_name_tpl': self.sub_if_name_tpl,
'ip1': ip1,
'ip2': ip2,
'ptf_port1': ptf_port1,
'ptf_port2': ptf_port2,
'random_vrf_list': random_vrf_list
}
ptfhost.host.options['variable_manager'].extra_vars.update(ptf_extra_vars)
ptfhost.template(src='vrf/vrf_capacity_ptf_cfg.j2', dest='/tmp/vrf_capacity_ptf_cfg.sh', mode="0755")
ptfhost.shell('/tmp/vrf_capacity_ptf_cfg.sh')
# ping to trigger neigh resolving, also acitvate the static routes
dut_extra_vars.update({
'random_vrf_list': random_vrf_list,
'count': 1,
'timeout': 1
})
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_ping.j2', dest='/tmp/vrf_capacity_neigh_learning.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_neigh_learning.sh', module_ignore_errors=True)
# wait for route/neigh entries apply to asic
time.sleep(5)
# -------- Testing ----------
yield
# -------- Teardown ----------
# remove cfg on ptf
ptfhost.shell("ip address flush dev eth{}".format(ptf_port1))
ptfhost.shell("ip address flush dev eth{}".format(ptf_port2))
ptfhost.template(src='vrf/vrf_capacity_del_ptf_cfg.j2', dest='/tmp/vrf_capacity_del_ptf_cfg.sh', mode="0755")
ptfhost.shell('/tmp/vrf_capacity_del_ptf_cfg.sh')
duthost.shell("config interface startup {}".format(dut_port1))
duthost.shell("config interface startup {}".format(dut_port2))
# remove cfg on dut
if self.cleanup_method == 'reboot':
reboot(duthost, localhost)
else:
duthost.shell("config interface shutdown {}".format(dut_port1))
duthost.shell("config interface shutdown {}".format(dut_port2))
# flush macs, arps and neighbors
duthost.shell("sonic-clear arp")
duthost.shell("sonic-clear fdb all")
# remove static routes
dut_extra_vars['op_code'] = 'del'
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_route_cfg.j2', dest='/tmp/vrf_capacity_route_cfg.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_route_cfg.sh')
# remove ip addr, intf, vrf, vlan member, vlan cfgs
for cfg_name, attrs in reversed(cfg_attrs_map.items()):
src_template = 'vrf/vrf_capacity_{}_cfg.j2'.format(cfg_name)
render_file = '/tmp/vrf_capacity_del_{}_cfg.json'.format(cfg_name)
duthost.template(src=src_template, dest=render_file)
duthost.shell("sonic-cfggen -j {} --write-to-db".format(render_file))
time.sleep(attrs['remove_sleep_time'])
duthost.shell("logger -p INFO -- '-------- {} end!!! ---------'".format(request.cls.__name__))
def test_ping(self, duthost, random_vrf_list):
dut_extra_vars = {
'vrf_name_tpl': self.vrf_name_tpl,
'random_vrf_list': random_vrf_list,
'ip1': self.ipnet1,
'ip2': self.ipnet2
}
duthost.host.options['variable_manager'].extra_vars.update(dut_extra_vars)
duthost.template(src='vrf/vrf_capacity_ping.j2', dest='/tmp/vrf_capacity_ping.sh', mode="0755")
duthost.shell('/tmp/vrf_capacity_ping.sh')
def test_ip_fwd(self, partial_ptf_runner, random_vrf_list):
ptf_port1 = g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'][1]
ptf_port2 = g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000'][1]
dst_ips = [str(IPNetwork(self.route_prefix)[1])]
partial_ptf_runner(
testname="vrf_test.CapTest",
src_ports=[ptf_port1],
dst_ports=[[ptf_port2]],
dst_ips=dst_ips,
random_vrf_list=random_vrf_list,
src_base_vid=self.src_base_vid,
dst_base_vid=self.dst_base_vid
)
class TestVrfUnbindIntf():
c_vars = {
'rebind_intf': True # rebind interface during teardown stage
}
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_unbindintf(self, duthost, ptfhost, testbed, cfg_facts):
# -------- Setup ----------
duthost.shell("config interface vrf unbind PortChannel0001")
# wait for neigh/route flush
time.sleep(5)
# -------- Testing ----------
yield
# -------- Teardown ----------
if self.c_vars['rebind_intf']:
self.rebind_intf(duthost)
wait_until(120, 10, check_bgp_facts, duthost, cfg_facts)
def rebind_intf(self, duthost):
duthost.shell("config interface vrf bind PortChannel0001 Vrf1")
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
duthost.shell("config interface ip add PortChannel0001 {}".format(ip))
@pytest.fixture(scope='class')
def setup_vrf_rebind_intf(self, duthost, cfg_facts):
self.rebind_intf(duthost)
self.c_vars['rebind_intf'] = False # Mark to skip rebind interface during teardown
# check bgp session state after rebind
assert wait_until(120, 10, check_bgp_facts, duthost, cfg_facts), \
"Bgp sessions should be re-estabalished after Portchannel0001 rebind to Vrf"
def test_pc1_ip_addr_flushed(self, duthost):
ip_addr_show = duthost.shell("ip addr show PortChannel0001")['stdout']
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
assert str(ip) not in ip_addr_show, "The ip addresses on PortChannel0001 should be flushed after unbind from vrf."
def test_pc1_neigh_flushed(self, duthost):
# verify ipv4
show_arp = duthost.shell("show arp")['stdout']
assert 'PortChannel0001' not in show_arp, "The arps on PortChannel0001 should be flushed after unbind from vrf."
# FIXME
# ipv6 neighbors do not seem to be flushed by kernel whenever remove ipv6 addresses
# from interface. So comment out the test of ipv6 neigh flushed.
# # verify ipv6
# show_ndp = duthost.shell("show ndp")['stdout']
# assert 'PortChannel0001' not in show_ndp, "The neighbors on PortChannel0001 should be flushed after unbind from vrf."
def test_pc1_neigh_flushed_by_traffic(self, partial_ptf_runner):
pc1_neigh_ips = []
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
pc1_neigh_ips.append(str(ip.ip+1))
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
dst_ips=pc1_neigh_ips,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
dst_ports=[g_vars['vrf_intf_member_port_indices']['Vrf1']['PortChannel0001']],
ipv4=True,
ipv6=False
)
def test_pc1_routes_flushed(self, ptfhost, testbed, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', testbed, ptfhost,
dst_intfs=['PortChannel0001'],
render_file="/tmp/unbindvrf_fib_1.txt")
# Send packet from downlink to uplink, port channel1 should no longer receive any packets
partial_ptf_runner(
testname="vrf_test.FibTest",
pkt_action='drop',
fib_info="/tmp/unbindvrf_fib_1.txt",
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_pc2_neigh(self, partial_ptf_runner):
pc2_neigh_ips = []
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0002'].iteritems():
for ip in ips:
pc2_neigh_ips.append(str(ip.ip+1))
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='fwd',
dst_ips=pc2_neigh_ips,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
dst_ports=[g_vars['vrf_intf_member_port_indices']['Vrf1']['PortChannel0002']]
)
def test_pc2_fib(self, ptfhost, testbed, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', testbed, ptfhost,
dst_intfs=['PortChannel0002'],
render_file="/tmp/unbindvrf_fib_2.txt")
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/unbindvrf_fib_2.txt",
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
@pytest.mark.usefixtures('setup_vrf_rebind_intf')
def test_pc1_neigh_after_rebind(self, partial_ptf_runner):
pc1_neigh_ips = []
for ver, ips in g_vars['vrf_intfs']['Vrf1']['PortChannel0001'].iteritems():
for ip in ips:
pc1_neigh_ips.append(str(ip.ip+1))
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='fwd',
dst_ips=pc1_neigh_ips,
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000'],
dst_ports=[g_vars['vrf_intf_member_port_indices']['Vrf1']['PortChannel0001']],
ipv4=True,
ipv6=False
)
@pytest.mark.usefixtures('setup_vrf_rebind_intf')
def test_vrf1_fib_after_rebind(self, ptfhost, testbed, partial_ptf_runner):
gen_vrf_fib_file('Vrf1', testbed, ptfhost,
dst_intfs=['PortChannel0001', 'PortChannel0002'],
render_file='/tmp/rebindvrf_vrf1_fib.txt')
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/rebindvrf_vrf1_fib.txt",
src_ports=g_vars['vrf_member_port_indices']['Vrf1']
)
class TestVrfDeletion():
c_vars = {
'restore_vrf': True
}
def restore_vrf(self, duthost):
duthost.shell("config vrf add Vrf1")
for intf, ip_facts in g_vars['vrf_intfs']['Vrf1'].iteritems():
duthost.shell("config interface vrf bind %s Vrf1" % intf)
for ver, ips in ip_facts.iteritems():
for ip in ips:
duthost.shell("config interface ip add {} {}".format(intf, ip))
@pytest.fixture(scope="class", autouse=True)
def setup_vrf_deletion(self, duthost, ptfhost, testbed, cfg_facts):
# -------- Setup ----------
gen_vrf_fib_file('Vrf1', testbed, ptfhost,
dst_intfs=['PortChannel0001', 'PortChannel0002'],
render_file="/tmp/vrf1_fib.txt")
gen_vrf_fib_file('Vrf2', testbed, ptfhost,
dst_intfs=['PortChannel0003', 'PortChannel0004'],
render_file="/tmp/vrf2_fib.txt")
gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt")
gen_vrf_neigh_file('Vrf2', ptfhost, render_file="/tmp/vrf2_neigh.txt")
duthost.shell("config vrf del Vrf1")
# -------- Testing ----------
yield
# -------- Teardown ----------
if self.c_vars['restore_vrf']:
self.restore_vrf(duthost)
wait_until(120, 10, check_bgp_facts, duthost, cfg_facts)
@pytest.fixture(scope='class')
def setup_vrf_restore(self, duthost, cfg_facts):
self.restore_vrf(duthost)
self.c_vars['restore_vrf'] = False # Mark to skip restore vrf during teardown
# check bgp session state after restore
assert wait_until(120, 10, check_bgp_facts, duthost, cfg_facts), \
"Bgp sessions should be re-estabalished after restore Vrf1"
def test_pc1_ip_addr_flushed(self, duthost):
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'PortChannel0001' not in show_interfaces, "The ip addr of PortChannel0001 should be flushed after Vrf1 is deleted."
def test_pc2_ip_addr_flushed(self, duthost):
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'PortChannel0002' not in show_interfaces, "The ip addr of PortChannel0002 should be flushed after Vrf1 is deleted."
def test_vlan1000_ip_addr_flushed(self, duthost):
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'Vlan1000' not in show_interfaces, "The ip addr of Vlan1000 should be flushed after Vrf1 is deleted."
def test_loopback0_ip_addr_flushed(self, duthost):
show_interfaces = duthost.shell("show ip interfaces")['stdout']
assert 'Loopback0' not in show_interfaces, "The ip addr of Loopback0 should be flushed after Vrf1 is deleted."
def test_vrf1_neighs_flushed(self, duthost):
ip_neigh_show = duthost.shell("ip neigh show vrf Vrf1", module_ignore_errors=True)['stdout']
assert '' == ip_neigh_show, "The neighbors on Vrf1 should be flushed after Vrf1 is deleted."
def test_vrf1_neighs_flushed_by_traffic(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
pkt_action='drop',
fwd_info="/tmp/vrf1_neigh.txt",
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf1_routes_flushed(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
pkt_action='drop',
fib_info="/tmp/vrf1_fib.txt",
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
def test_vrf2_neigh(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
fwd_info="/tmp/vrf2_neigh.txt",
src_ports= g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
def test_vrf2_fib(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/vrf2_fib.txt",
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf2']['Vlan2000']
)
@pytest.mark.usefixtures('setup_vrf_restore')
def test_vrf1_neigh_after_restore(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FwdTest",
fwd_info="/tmp/vrf1_neigh.txt",
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
@pytest.mark.usefixtures('setup_vrf_restore')
def test_vrf1_fib_after_resotre(self, partial_ptf_runner):
partial_ptf_runner(
testname="vrf_test.FibTest",
fib_info="/tmp/vrf1_fib.txt",
src_ports=g_vars['vrf_intf_member_port_indices']['Vrf1']['Vlan1000']
)
|
WayScript_bot.py
|
import telebot
import json
from threading import Thread, Event
bot = telebot.AsyncTeleBot()
stop_event = Event()
commands_sent = []
google_assistant_reply = []
chat_id = 511021111
variables = {'Targets_Commands': 'pc laptop wakeup'}
def send_given_reqest(target, command):
"""Function send request to next point in chain. Return True if it needs to be waiting for respond."""
request = {"target": target, "command": command}
message = json.dumps(request)
commands_sent.append({key: request[key] for key in ("target", "command")})
message_sended = bot.send_message(chat_id, message).wait()
bot.delete_message(message_sended.chat.id, message_sended.message_id)
# print("Command send: ", message)
def wait_for_request():
"""Function returns answer of request."""
while commands_sent:
updates = bot.get_updates(offset=(bot.last_update_id + 1), timeout=1)
bot.process_new_updates(updates)
if stop_event.is_set():
break
@bot.message_handler(content_types=['text'])
def get_reply(message):
if message.chat.id == chat_id:
reply = json.loads(message.text)
commands_sent.remove({key: reply[key] for key in ("target", "command")})
# print("Command replyed: ", reply)
# getting program reply for google assistant to say
if reply["result"]:
google_assistant_reply.append(reply["result"])
else:
bot.send_message(message.chat.id, 'Sorry, it`s private bot.')
def main():
"""Function gets google assistant request, handle it and returns answer."""
_ = set(variables['Targets_Commands'].split(' '))
targets, commands = _ & {'pc', 'laptop'}, _ - {'pc', 'laptop'}
for target in targets:
for command in commands:
send_given_reqest(target, command)
action_thread = Thread(target=wait_for_request)
action_thread.start()
action_thread.join(timeout=20)
stop_event.set()
if not google_assistant_reply:
google_assistant_reply.append("Sorry, server is unreachable.")
print(google_assistant_reply)
variables[ "Google_assistant_answers" ] = google_assistant_reply
if __name__ == '__main__':
main()
|
job_test.py
|
""""""
import os
import unittest
import re
from threading import Thread
from django.test import TestCase
from norc.core.models import Job, JobNode, Dependency, Instance, Schedule
from norc.norc_utils import wait_until, log, testing
class JobTest(TestCase):
def queue_items(self):
items = []
while self.queue.count() > 0:
items.append(self.queue.pop())
return items
def _start_instance(self, instance):
try:
instance.start()
except SystemExit:
pass
def setUp(self):
self.queue = testing.make_queue()
self.tasks = [testing.make_task('JobTask%s' % i) for i in range(6)]
self.job = Job.objects.create(name='TestJob')
self.nodes = [JobNode.objects.create(task=self.tasks[i], job=self.job)
for i in range(6)]
n = self.nodes
Dependency.objects.create(parent=n[0], child=n[2])
Dependency.objects.create(parent=n[0], child=n[3])
Dependency.objects.create(parent=n[1], child=n[4])
Dependency.objects.create(parent=n[2], child=n[3])
Dependency.objects.create(parent=n[2], child=n[5])
Dependency.objects.create(parent=n[3], child=n[5])
Dependency.objects.create(parent=n[4], child=n[5])
def test_job(self):
schedule = Schedule.create(self.job, self.queue, 1)
instance = Instance.objects.create(task=self.job, schedule=schedule)
# instance.log = log.Log(os.devnull)
self.thread = Thread(target=instance.start)
self.thread.start()
wait_until(lambda: self.queue.count() == 2, 2)
self.assertEqual(set([i.item.node for i in self.queue.items.all()]),
set([self.nodes[0], self.nodes[1]]))
for i in self.queue_items():
self._start_instance(i)
self.assertEqual(set([i.item.node for i in self.queue.items.all()]),
set([self.nodes[2], self.nodes[4]]))
for i in self.queue_items():
self._start_instance(i)
self.assertEqual(set([i.item.node for i in self.queue.items.all()]),
set([self.nodes[3]]))
for i in self.queue_items():
self._start_instance(i)
self.assertEqual(set([i.item.node for i in self.queue.items.all()]),
set([self.nodes[5]]))
for i in self.queue_items():
self._start_instance(i)
self.thread.join(2)
self.assertFalse(self.thread.isAlive())
|
server.py
|
import socket
import threading
from HW1.Q2.Model.Message import Message
from HW1.Q2.Model.Account import Account
from HW1.Q2.Model.Channel import Channel
from HW1.Q2.Model.Group import Group
from HW1.Q2.constants import *
# Locks for thread safety
clients_lock = threading.Lock()
accounts_lock = threading.Lock()
channels_lock = threading.Lock()
groups_lock = threading.Lock()
private_message_lock = threading.Lock()
clients: dict[str, socket.socket] = {}
accounts: list[Account] = []
channels: list[Channel] = []
groups: list[Group] = []
# Private messages are stored in a dictionary. Key: (id1 , id2) -> Value: List of Messages between id1 and id2
private_messages: dict[tuple[str, str], list[Message]] = {}
# Send Command / Error / Success Message to Client
def send_command(connection: socket.socket, cmd: str):
connection.send(COMMANDS[cmd].encode(ENCODING))
def notify_channel_group(cg: Channel or Group, msg: Message, is_channel : bool):
intro = "New Message in Channel" if is_channel else "New Message in Group"
k = clients.keys()
for account in cg.members:
if account in k:
clients[account].send(f"{intro} {cg.id}\n{msg}".encode(ENCODING))
# Send all messages that are stored in 'messages' list to the client
def send_all_message(connection: socket.socket, messages: list[Message]):
all_msg = ""
for msg in messages:
all_msg += (str(msg) + "\n")
# Because messages can be very long, at first their size is sent to the client.
# So client will be aware of how many bytes it should expect to get.
send_len = len(all_msg) + 2
send_length_msg = COMMANDS[SEND_ALL_MESSAGE_PROTOCOL] + " " + str(send_len)
connection.send(send_length_msg.encode(ENCODING))
connection.send(all_msg.encode(ENCODING))
def id_exist_in_list(element_id, array):
return any([x for x in array if x.id == element_id])
def client_exists(element_id):
return element_id in clients
def handle_client(connection: socket.socket, address):
print(f"[CONNECTED] {address}")
account_id = make_or_find_account(connection)
try:
while True:
data = connection.recv(1024).decode(ENCODING)
# In each of these function, a regex match is checked. So only one of the will be executed.
success = create_group(account_id, connection, data) \
or create_channel(account_id, connection, data) \
or join_group(account_id, connection, data) \
or join_channel(account_id, connection, data) \
or view_channel_message(account_id, connection, data) \
or send_group_or_pv_message(account_id, connection, data) \
or view_group_message(account_id, connection, data) \
or view_private_message(account_id, connection, data)
if not success:
send_command(connection, INVALID_COMMAND)
except (socket.error, socket.herror, socket.gaierror):
with clients_lock:
clients.pop(account_id)
connection.close()
def send_group_or_pv_message(account_id, connection, data):
result = SEND_PV_OR_GROUP_REGEX.match(data)
if result:
group_user_channel_id = result.group(1)
msg_str = result.group(2)
msg = Message(account_id, msg_str)
if id_exist_in_list(group_user_channel_id, groups):
send_group_msg(account_id, connection, group_user_channel_id, msg)
elif id_exist_in_list(group_user_channel_id, accounts):
send_private_message(account_id, connection, group_user_channel_id, msg)
elif id_exist_in_list(group_user_channel_id, channels):
send_channel_message(account_id, connection, group_user_channel_id, msg)
else:
send_command(connection, NO_SUCH_GROUP_OR_USER_OR_CHANNEL)
return True
return False
def send_channel_message(account_id, connection, channel_id, msg):
channel = [x for x in channels if x.id == channel_id][0]
if account_id == channel.owner_id:
channel.messages.append(msg)
send_command(connection, CHANNEL_MESSAGE_SUCCESS)
notify_channel_group(channel, msg,True)
else:
send_command(connection, CHANNEL_WRITE_INVALID_PERMISSION)
def send_private_message(account_id, connection, another_account_id, msg):
acc = [x for x in accounts if x.id == another_account_id][0]
key = tuple(sorted((account_id, acc.id)))
if key not in private_messages.keys():
private_messages[key] = []
private_messages[key].append(msg)
send_command(connection, PRIVATE_MESSAGE_SUCCESS)
def send_group_msg(account_id, connection, group_id, msg):
grp = [x for x in groups if x.id == group_id][0]
if account_id in grp.members:
grp.messages.append(msg)
send_command(connection, GROUP_MESSAGE_SUCCESS)
notify_channel_group(grp, msg, False)
else:
send_command(connection, GROUP_WRITE_INVALID_PERMISSION)
def view_util(account_id, connection, data, regex_pattern, arr, not_sub_cmd, not_exist_cmd):
result = regex_pattern.match(data)
if result:
element_id = result.group(1)
if id_exist_in_list(element_id, arr):
element = [x for x in arr if x.id == element_id][0]
if account_id in element.members:
send_all_message(connection, element.messages)
else:
send_command(connection, not_sub_cmd)
else:
send_command(connection, not_exist_cmd)
return True
return False
def view_group_message(account_id, connection, data):
return view_util(account_id, connection, data, VIEW_GROUP_REGEX, groups, NOT_SUBSCRIBED_TO_GROUP, NO_SUCH_GROUP)
def view_channel_message(account_id, connection, data):
return view_util(account_id, connection, data, VIEW_CHANNEL_REGEX, channels, NOT_SUBSCRIBED_TO_CHANNEL,
NO_SUCH_CHANNEL)
def view_private_message(account_id, connection, data):
result = VIEW_PV_REGEX.match(data)
if result:
acc_id = result.group(1)
if id_exist_in_list(acc_id, accounts):
acc = [x for x in accounts if x.id == acc_id][0]
key = tuple(sorted((account_id, acc.id)))
if key in private_messages:
send_all_message(connection, private_messages[key])
else:
send_command(connection, NO_PV_BETWEEN_THESE_USERS)
else:
send_command(connection, NO_SUCH_USER)
return True
return False
def join_util(account_id, connection, data, regex_pattern, arr, already_join_cmd, success_cmd, not_exist_cmd):
result = regex_pattern.match(data)
if result:
element_id = result.group(1)
if id_exist_in_list(element_id, arr):
element = [x for x in arr if x.id == element_id][0]
if account_id in element.members:
send_command(connection, already_join_cmd)
else:
element.members.append(account_id)
send_command(connection, success_cmd)
else:
send_command(connection, not_exist_cmd)
return True
return False
def join_channel(account_id, connection, data):
return join_util(account_id, connection, data, JOIN_CHANNEL_REGEX, channels, CHANNEL_ALREADY_JOINED, CHANNEL_JOIN,
NO_SUCH_CHANNEL)
def join_group(account_id, connection, data):
return join_util(account_id, connection, data, JOIN_GROUP_REGEX, groups, GROUP_ALREADY_JOINED, GROUP_JOIN,
NO_SUCH_GROUP)
def create_channel(account_id, connection, data):
result = CREATE_CHANNEL_REGEX.match(data)
if result:
channel_id = result.group(1)
if not id_exist_in_list(channel_id, accounts) and not id_exist_in_list(channel_id, groups) \
and not id_exist_in_list(channel_id, channels):
channel = Channel(channel_id, account_id)
with channels_lock:
channels.append(channel)
send_command(connection, CHANNEL_CREATED)
else:
send_command(connection, ACCOUNT_GROUP_CHANNEL_ALREADY_EXIST)
return True
return False
def create_group(account_id, connection, data):
result = CREATE_GROUP_REGEX.match(data)
if result:
group_id = result.group(1)
if not id_exist_in_list(group_id, accounts) and not id_exist_in_list(group_id, groups) and not id_exist_in_list(
group_id, channels):
group = Group(group_id, account_id)
with groups_lock:
groups.append(group)
send_command(connection, GROUP_CREATED)
else:
send_command(connection, ACCOUNT_GROUP_CHANNEL_ALREADY_EXIST)
return True
else:
return False
# This function checks if an online user with that id exists or not. If it exists, request the client to enter another
# id It an account exist but it is not online, match them. Else create a new user.
# Also note that it also check uniqueness between channel and group ids.
def make_or_find_account(connection):
need_to_repeat = True
account_id = ""
while need_to_repeat:
send_command(connection, USER_ID_REQ)
data = connection.recv(1024)
if not data:
connection.close()
account_id = data.decode(ENCODING)
# Uniqueness
if not client_exists(account_id) and not id_exist_in_list(account_id, channels) and not id_exist_in_list(
account_id, groups):
need_to_repeat = False
else:
send_command(connection, ACCOUNT_GROUP_CHANNEL_ALREADY_EXIST)
# Check if account exist or need to be created.
account_exist = False
if id_exist_in_list(account_id, accounts):
account = [x for x in accounts if x.id == account_id][0]
account_exist = True
else:
account = Account(account_id)
with clients_lock:
clients[account.id] = connection
# If account doesn't exist, append it to 'accounts' list.
if not account_exist:
with accounts_lock:
accounts.append(account)
if not account_exist:
send_command(connection, ACCOUNT_CREATE_SUCCESS)
else:
send_command(connection, CONNECTED_TO_ALREADY_EXIST_ACCOUNT)
return account_id
if __name__ == '__main__':
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
while True:
conn, addr = s.accept()
t = threading.Thread(target=handle_client, args=(conn, addr))
t.start()
|
train.py
|
#!/usr/bin/env python
from __future__ import division
"""
Portions Copyright (c) Microsoft Corporation
"""
"""
Main training workflow
"""
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
import bertsum.distributed as distributed
from bertsum.models import data_loader, model_builder
from bertsum.models.data_loader import load_dataset
from bertsum.models.model_builder import Summarizer
from bertsum.models.trainer import build_trainer
from bertsum.others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, device_id, test_iter, cal_lead=False, cal_oracle=False,):
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=True)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['lead', 'oracle', 'train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
def __map_gpu_ranks(gpu_ranks):
#gpu_ranks_list=gpu_ranks.split(',')
gpu_ranks_list = gpu_ranks
print(gpu_ranks_list)
gpu_ranks_map = {}
for i, rank in enumerate(gpu_ranks_list):
gpu_ranks_map[int(rank)]=i
return gpu_ranks_map
args.gpu_ranks_map = __map_gpu_ranks(args.gpu_ranks)
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
print(args)
if(args.world_size>1):
print(args)
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
print(args)
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
baseline(args, device_id, test_iter, cal_lead=True)
elif (args.mode == 'oracle'):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
baseline(args, device_id, test_iter, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
views.py
|
from wsgiref.util import request_uri
from django.http import HttpResponse, StreamingHttpResponse
from django.shortcuts import render
from django.views.decorators import gzip
from django.contrib.auth.decorators import login_required
from cameras.models import Camera
import cv2
import threading
from timeit import default_timer as timer
from win10toast import ToastNotifier
class VideoCamera(object):
def __init__(self, source = 0, camera = 'Camera 1'):
self.camera = camera
self.net = cv2.dnn_DetectionModel('yolov4-tiny-cctv.cfg', 'yolov4-tiny-cctv_best.weights')
self.net.setInputSize(416, 416)
self.net.setInputScale(1.0 / 255)
self.net.setInputSwapRB(True)
self.source = source
self.video = cv2.VideoCapture(self.source)
(self.grabbed, self.frame) = self.video.read()
self.classes = []
self.confidences = []
self.boxes = []
self.names = ['knife', 'pistol']
self.toast = ToastNotifier()
self.thread = threading.Thread(target=self.update, args=())
self.thread.start()
self.notification_thread = threading.Thread(target=self.notify, args=())
self.notification_thread.start()
self.last_notification = None
def __del__(self):
self.video.release()
self.thread.stop()
self.notification_thread.stop()
def stop(self):
self.__del__()
def notify(self):
while True:
if len(self.classes) > 0:
print("Attention!", "A " + str(self.names[self.classes[0]]) + " found by the system at " + self.camera + ".")
current_time = timer()
if self.last_notification is None or current_time - self.last_notification > 3 and (len(self.confidences) > 0 and self.confidences[0] >= 0.5):
self.toast = ToastNotifier()
self.toast.show_toast("Attention!", "A " + str(self.names[self.classes[0]]) + " found by the system at " + self.camera + ".")
self.last_notification = current_time
if self.last_notification is None:
self.last_notification = timer()
def update(self):
while self.video.isOpened():
(self.grabbed, self.frame) = self.video.read()
def get_frame(self):
# _, jpg = cv2.imencode('.jpg', self.frame)
# return jpg.tobytes()
if self.grabbed and int(self.video.get(cv2.CAP_PROP_POS_FRAMES)) % 12 == 0:
classes, confidences, boxes = self.detect(self.frame)
self.classes = classes
self.confidences = confidences
self.boxes = boxes
if self.grabbed:
current_frame = self.draw(self.frame)
_, jpeg = cv2.imencode('.jpg', current_frame)
return jpeg.tobytes()
self.video.release()
def detect(self, image):
return self.net.detect(image, confThreshold=0.3, nmsThreshold=0.4)
def draw(self, image):
for i in range(len(self.classes)):
class_name = self.names[self.classes[i]]
confidence = self.confidences[i]
box = self.boxes[i]
label = '%.2f' % confidence
label = '%s: %s' % (class_name, label)
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX, 0.5, 1)
left, top, width, height = box
top = max(top, labelSize[1])
cv2.rectangle(image, box, color=(0, 255, 0), thickness=5)
cv2.rectangle(image, (left, top - labelSize[1]), (left + labelSize[0], top + baseLine), (255, 255, 255), cv2.FILLED)
cv2.putText(image, label, (left, top), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0))
return image
import time
def gen(camera: VideoCamera, request):
if request.path != '/stream/video_feed/':
camera.stop()
while request.path == '/stream/video_feed/':
print(time.time())
frame = camera.get_frame()
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def my_yielder(frame):
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@login_required
@gzip.gzip_page
# Create your views here.
def index(request):
cameras = Camera.objects.all()[:15]
return render(request, 'stream/index.html', { 'cameras': cameras })
def video_feed(request):
cam = VideoCamera(request.GET['url'], request.GET['camera'])
if request.path != '/stream/video_feed/':
cam.stop()
if cam is not None:
while request.path == '/stream/video_feed/':
return StreamingHttpResponse(gen(cam, request), content_type='multipart/x-mixed-replace; boundary=frame')
|
__init__.py
|
'''
Slave classes responsible for distributing requests to replicas, proposers, ...
'''
from rexfw import Parcel
class Slave(object):
def __init__(self, replicas, comm):
'''
Default slave class
:param replicas: a dict of replicas with their names as keys
:type replicas: dict
:param comm: a communicator object to communicate with the master object
:type comm: :class:`.AbstractCommunicator`
'''
self.replicas = replicas
self._comm = comm
def _listen(self):
'''
Runs an infinite loop, polling for messages and passing them on to their
destination, which currently is only a single replica
'''
while True:
parcel = self._receive_parcel()
if parcel.receiver in self.replicas.iterkeys():
result = self.replicas[parcel.receiver].process_request(parcel.data)
if result == -1:
break
else:
raise ValueError("Replica '{}' not found.".format(parcel.receiver))
def listen(self):
'''
Starts a thread and runs the infinite loop (_listen)
'''
from threading import Thread
self._thread = Thread(target=self._listen)
self._thread.start()
def _receive_parcel(self):
'''
Uses the communicator to receive a :class:`.Parcel` from any source
'''
return self._comm.recv(source='all')
class UDCountsSlave(object):
def __init__(self, replicas, comm, sim_path):
self.replicas = replicas
self._comm = comm
self._sim_path = sim_path
def _listen(self):
while True:
parcel = self._receive_parcel()
if parcel.receiver in self.replicas.iterkeys():
result = self.replicas[parcel.receiver].process_request(parcel.data)
if result == -1:
import numpy
replica_name = self.replicas[self.replicas.keys()[0]].name
numpy.save(self._sim_path + 'statistics/up_down_counts_{}.npy'.format(replica_name),
self.replicas[replica_name].up_down_counts)
break
else:
raise ValueError("Replica '{}' not found.".format(parcel.receiver))
def listen(self):
from threading import Thread
self._thread = Thread(target=self._listen)
self._thread.start()
def _receive_parcel(self):
return self._comm.recv(source='all')
|
etapa9-MultiplosProcessos-3.py
|
import multiprocessing, time, random
def somaProc(q1,q2):
lista = q1.get()
soma = 0
for i in lista:
soma = soma + i
q2.put(soma)
if __name__ == "__main__":
N = int(input("Entre com o tamanho do vetor:"))
# Captura tempo inicial
t_inicio = float(time.time())
#Gera lista com valores aleatórios
lista = []
for i in range(N):
lista.append(random.randint(-50,51))
NProc = 8
q_entrada = multiprocessing.Queue()
q_saida = multiprocessing.Queue()
lista_proc = []
for i in range(NProc):
ini = i + int(N/NProc)
fim = (i + 1) * int(N / NProc)
q_entrada.put(lista[ini:fim])
p = multiprocessing.Process(target=somaProc, args=(q_entrada,q_saida,))
p.start()
lista_proc.append(p)
for p in lista_proc:
p.join()
soma = 0
for i in range(0,NProc):
soma = soma + q_saida.get()
# Captura tempo final
t_fim = float(time.time())
#Imprime o resultado e o tempo de execução
print(f"Soma: {soma}")
print(f"Tempo total: {t_fim - t_inicio}")
|
connector.py
|
from __future__ import print_function
import threading
from io import TextIOBase
import json
import grpc
import connector_service_pb2
import connector_service_pb2_grpc
import wrappers_pb2
import empty_pb2
from connector_types import PingResponseType, TestRequestListenerBase, TestType
from profiling_configuration import ProfilingConfiguration
from os import path
import sys
import re
import queue
import time
__copyright__ = "Copyright 2020 Profilence"
__license__ = "Apache License, Version 2.0"
class Connector(object):
""" Driver class for connecting to Profilence ZETA test service """
log = None
@staticmethod
def _log(level, message):
if Connector.log is not None:
Connector.log(level, message)
def __init__(self, host, port):
""" Creates a new Connector instance and connects to the service
Parameters:
host: The service address
port: The service port
"""
self._channel = grpc.insecure_channel('%s:%d' % (host, port))
self._blockingStub = connector_service_pb2_grpc.ConnectorServiceStub(self._channel)
self._device_log_entry_queue = queue.Queue(maxsize=0)
self._shutdown = False
self._device_log_sender = threading.Thread(target=self._start_device_log_listener, daemon=False)
self._device_log_sender.start()
def _start_device_log_listener(self):
try:
self._blockingStub.LogDevice(self._read_device_log_entries())
except grpc.RpcError:
pass
def _read_device_log_entries(self):
while not self._shutdown:
entry = self._device_log_entry_queue.get(block=True, timeout=5)
if entry:
yield entry
def _wait_device_log_queue(self):
while not self._shutdown and not self._device_log_entry_queue.empty():
time.sleep(0.05)
def shutdown(self):
""" Shuts down the connection """
self._shutdown = True
self._device_log_entry_queue.put(None)
self._device_log_sender.join(5)
self._channel.close()
def ping(self):
""" Pings the service
Returns:
True on success; False otherwise
"""
value = 0
try:
value = self._blockingStub.Ping(connector_service_pb2.PingMessage()).result
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return value
def start_run_with_recommended_settings(self, run_name, set_name, project, version, primary_device_serial,
primary_device_type, secondary_device_serial, secondary_device_type, tags):
""" Requests the service for a new test run with recommended profiling settings
Parameters:
run_name (str): Name of the test run
set_name (str): Name of the test set
project (str): Name of the project under test
version (str): Version of the project
primary_device_serial (str): Identifier of the primary DUT
primary_device_type (str): Type of the primary DUT
secondary_device_serial (str): Identifier of the secondary DUT
secondary_device_type (str): Type of the secondary DUT
tags (dict): Tags for the test run
Returns:
Test run id (str) on success; None otherwise
"""
return self.start_run(run_name,
set_name,
project,
version,
primary_device_serial,
primary_device_type,
secondary_device_serial,
secondary_device_type,
None,
tags)
def start_run(self, run_name, set_name, project, version, primary_device_serial, primary_device_type,
secondary_device_serial, secondary_device_type, profiling_settings, tags, run_id=None):
""" Requests the service for a new test run
Parameters:
run_name (str): Name of the test run
set_name (str): Name of the test set
project (str): Name of the project under test
version (str): Version of the project
primary_device_serial (str): Identifier of the primary DUT
primary_device_type (str): Type of the primary DUT
secondary_device_serial (str): Identifier of the secondary DUT
secondary_device_type (str): Type of the secondary DUT
profiling_settings (str/TextIOBase): Profiling settings as JSON string, or a file handle to JSON file
tags (dict): Tags for the test run
run_id (str): ID of the test run if the run is requested by the server
Returns:
Test run id (str) on success; None otherwise
"""
if profiling_settings and isinstance(profiling_settings, ProfilingConfiguration):
profiling_settings = profiling_settings.to_json()
elif profiling_settings and isinstance(profiling_settings, TextIOBase):
try:
json.load(profiling_settings) #validate json file
profiling_settings = profiling_settings.read()
except IOError as ioe:
self._log(2, 'Profiling settings read failed: %s' % str(ioe))
return
except json.decoder.JSONDecodeError as jsone:
self._log(2, 'Invalid JSON file: %s' % str(jsone))
return
request = connector_service_pb2.StartRunRequest()
request.run_name = run_name or ''
request.set_name = set_name or ''
request.project = project or ''
request.version = version or ''
request.primary_device_serial = primary_device_serial or ''
request.secondary_device_serial = secondary_device_serial or ''
request.primary_device_type = primary_device_type or ''
request.secondary_device_type = secondary_device_type or ''
request.profiling_settings = profiling_settings or ''
request.tags.update(tags or {})
request.run_id = run_id
try:
response = self._blockingStub.StartRun(request)
return response.run_id
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return None
def on_use_case_start(self, run_id, use_case_name, use_case_id, test_case_group_name=None, test_set_name=None,
test_type=TestType.NORMAL, target_process=None, requirement_id=None):
""" Called to notify the service about start of a new use/test case
Parameters:
run_id (str): ID of the test run
use_case_name (str): Name of the use/test case
use_case_id (str): ID of the use/test case
test_case_group_name (str): Name of the group the use case belongs to (optional)
test_set_name (str): Name of the test suite the use case belongs to (optional)
test_type (int): Type of the use case: Normal, Precondition, PostCondition (Normal by default)
target_process (str): Name of a process to monitor more closely during the use/test case (optional)
requirement_id (str): ID of requirement this use/test case verifies (optional)
Returns:
True is notification sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
use_case_name_invalid = use_case_name is None or len(use_case_name.strip()) == 0
use_case_id_invalid = use_case_id is None or len(use_case_id.strip()) == 0
if use_case_name_invalid and use_case_id_invalid:
return False
request = connector_service_pb2.UseCaseStartRequest()
request.run_id = run_id
request.use_case_name = use_case_name or ''
request.use_case_id = use_case_id or ''
request.test_case_group_name = test_case_group_name or ''
request.test_set_name = test_set_name or ''
request.test_type = test_type
request.target_process = target_process or ''
request.requirement_id = requirement_id or ''
try:
self._blockingStub.OnUseCaseStart(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def on_use_case_end(self, run_id, result, active_run_time, fail_cause, reset_intended):
""" Called to notify the service about end of the use/test case
Parameters:
run_id (str): ID of the test run
result (bool): Result of the use/test case (True=Pass; False=Failure)
active_run_time (long): Time (milliseconds) used active testing during the test
fail_cause (str): Fail cause, if any
reset_intended (bool): True if a DUT reset was caused by the test; otherwise False
Returns:
True is notification sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
request = connector_service_pb2.UseCaseEndRequest()
request.run_id = run_id
request.result = result
request.activeRunTime = int(active_run_time)
request.fail_cause = fail_cause or ''
request.reset_intended = reset_intended
try:
self._blockingStub.OnUseCaseEnd(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def on_log_step(self, run_id, step_name, result, screenshot, step_type):
""" Called to notify the service about a new step within the use/test case
Parameters:
run_id (str): ID of the test run
step_name (str): Step name/description
result (bool): True if the step passed as expected; otherwise False
screenshot (bool/bytes/str): True for requesting the service to take a screenshot;
image bytes or path to image file for sending a local screenshot for
the service. If requesting the service to take a screenshot, make sure
it's supported for the DUT platform by the service.
step_type (enum): Value of step type. Values can be found in connector_service_pb2.py under "StepType"
Returns:
True is notification sent successfully; otherwise False
"""
take_screenshot = isinstance(screenshot, bool) and screenshot is True
screenshot_bytes = None
if not take_screenshot:
if isinstance(screenshot, bytes) and len(screenshot):
screenshot_bytes = screenshot
elif isinstance(screenshot, str):
screenshot_bytes = get_bytes_from_file(screenshot)
self._on_log_step(run_id, step_name, result, take_screenshot, screenshot_bytes, step_type)
def _on_log_step(self, run_id, step_name, result, take_screenshot, screenshot_bytes, step_type):
if run_id is None or len(run_id.strip()) == 0:
return False
if not take_screenshot and (not screenshot_bytes or len(screenshot_bytes) == 0) and (
step_name is None or len(step_name.strip()) == 0):
return False
request = connector_service_pb2.LogStepRequest()
request.run_id = run_id
request.step_name = step_name or ''
request.result = result
request.take_screenshot = take_screenshot
if step_type == None:
step_type = 1
request.stepType = step_type
if screenshot_bytes:
request.screenshot_bytes = screenshot_bytes
try:
self._blockingStub.OnLogStep(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def log_trace(self, run_id, data):
""" Send a line or internal/execution log to the service
Parameters:
run_id (str): ID of the test run
data (str): Log line
Returns:
True is notification sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
if data is None or len(data.strip()) == 0:
return False
request = connector_service_pb2.LogTraceRequest()
request.run_id = run_id
request.data = data
try:
self._blockingStub.LogTrace(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def notify_reset(self, run_id, timestamp, reset_type, reset_reasons, system_properties_after):
""" Notify the service about a reset detected in the DUT
Parameters:
run_id (str): ID of the test run
timestamp (float): Time (milliseconds after epoc) when the reset took place
reset_type (int): Type of the reset (use ResetType enum)
reset_reasons (dict): Reset reasons if known
system_properties_after (dict): DUT's system properties after the recovered from the reset
Returns:
True is notification sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
request = connector_service_pb2.ResetEntry()
request.run_id = run_id
request.timestamp = timestamp
request.type = reset_type
request.reasons.update(reset_reasons or {})
request.properties.update(system_properties_after or {})
try:
self._blockingStub.NotifyReset(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def notify_event(self, run_id, timestamp, event_type, is_system_process, name, process, exception_type, data_lines):
""" Notify the service about an event detected in the DUT
Parameters:
run_id (str): ID of the test run
timestamp (float): Time (milliseconds after epoc) when the event took place
event_type (int): Type of the event (use EventType enum)
is_system_process (bool): True if caused by system process; False otherwise
name (str): Name of the event dump
process (str): Name of the process created/caused the event
exception_type (str): Description of the event: e.g. 'OutOfMemoryException'
data_lines (list[str]): Event data
Returns:
True is notification sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
request = connector_service_pb2.EventEntry()
request.run_id = run_id
request.timestamp = timestamp
request.type = event_type
request.is_system_process = is_system_process
request.name = name or ''
request.process = process or ''
request.exception_type = exception_type or ''
if data_lines and len(data_lines):
request.data.extend(data_lines)
try:
self._blockingStub.NotifyEvent(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def create_time_series(self, run_id, series_id, series_name, group, y_axis_name,
unit, series_type, namespace, process, description):
""" Initialize a new time series chart
Parameters:
run_id (str): ID of the test run
series_id (str): Unique ID of the series. This ID will be used when sending the values for the series
series_name (str): Name of the series
group (str): Group name for the series
y_axis_name (str): Name of the Y-axis
unit (str): Unit of the data
series_type (int): Type of the series (use SeriesType enum)
namespace (str): Process/package namespace (only for process specific charts)
process (str): Name of the process (only for process specific charts)
description (str): Description for the series
Returns:
True is data sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
if series_id is None or len(series_id.strip()) == 0:
return False
request = connector_service_pb2.DynamicSeriesInformation()
request.run_id = run_id
request.series_id = series_id
request.series_name = series_name or ''
request.group = group or ''
request.y_axis_name = y_axis_name or ''
request.unit = unit or ''
request.type = series_type
request.namespace = namespace or ''
request.process = process or ''
request.description = description or ''
try:
self._blockingStub.CreateTimeSeries(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def update_single_system_series(self, run_id, series_id, timestamp, value):
""" Update system singe series data
Parameters:
run_id (str): ID of the test run
series_id (str): Unique ID of the series.
timestamp (float): X-value. Time (milliseconds after epoc)
value (float): Y-value
Returns:
True is data sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
if series_id is None or len(series_id.strip()) == 0:
return False
request = connector_service_pb2.DynamicSingleSeriesUpdate()
request.run_id = run_id
request.series_id = series_id
request.timestamp = timestamp
request.value = value
try:
self._blockingStub.UpdateSingleSystemSeries(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def update_composite_system_series(self, run_id, series_id, timestamp, values):
""" Update system composite series data
Parameters:
run_id (str): ID of the test run
series_id (str): Unique ID of the series.
timestamp (float): X-value. Time (milliseconds after epoc)
values (dict[str, float]): Y-value per column
Returns:
True is data sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
if series_id is None or len(series_id.strip()) == 0:
return False
request = connector_service_pb2.DynamicCompositeSeriesUpdate()
request.run_id = run_id
request.series_id = series_id
request.timestamp = timestamp
request.values.update(values or {})
try:
self._blockingStub.UpdateCompositeSystemSeries(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def update_single_process_series(self, run_id, series_id, timestamp, package, process, value, pid=None):
""" Update process specific single series data
Parameters:
run_id (str): ID of the test run
series_id (str): Unique ID of the series.
timestamp (float): X-value. Time (milliseconds after epoc)
package (str): Namespace of the package
process (str): Name of the process
value (float): Y-value
pid (int): ID of the process (optional)
Returns:
True is data sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
if series_id is None or len(series_id.strip()) == 0:
return False
request = connector_service_pb2.DynamicProcessSingleSeriesUpdate()
request.run_id = run_id
request.series_id = series_id
request.timestamp = timestamp
request.package = package or ''
request.process = process or ''
request.value = value
if pid is not None:
pid_value = wrappers_pb2.Int32Value()
pid_value.value = pid
request.pid = pid_value
try:
self._blockingStub.UpdateSingleProcessSeries(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def update_composite_process_series(self, run_id, series_id, timestamp, package, process, values, pid=None):
""" Update process specific composite series data
Parameters:
run_id (str): ID of the test run
series_id (str): Unique ID of the series.
timestamp (float): X-value. Time (milliseconds after epoc)
package (str): Namespace of the package
process (str): Name of the process
values (dict[str, float]): Y-value per column
pid (int): ID of the process (optional)
Returns:
True is data sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
if series_id is None or len(series_id.strip()) == 0:
return False
request = connector_service_pb2.DynamicProcessCompositeSeriesUpdate()
request.run_id = run_id
request.series_id = series_id
request.timestamp = timestamp
request.package = package or ''
request.process = process or ''
request.values.update(values or {})
if pid is not None:
pid_value = wrappers_pb2.Int32Value()
pid_value.value = pid
request.pid = pid_value
try:
self._blockingStub.UpdateCompositeProcessSeries(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def on_device_log(self, run_id, timestamp, log_priority, source_buffer_type, tag, data):
""" Route a device log line to service
Parameters:
run_id (str): ID of the test run
timestamp (float): Time (milliseconds after epoc)
log_priority (int): Priority of the log message (use LogPriority enum)
source_buffer_type (int): Source buffer of the log message (use SourceBuffer enum)
tag (str): Tag of the message
data (str): Message data
Returns:
True is data buffered successfully; otherwise False
"""
self.__on_device_log_impl(run_id, 1, timestamp, log_priority, source_buffer_type, tag, data)
def on_device2_log(self, run_id, timestamp, log_priority, source_buffer_type, tag, data):
""" Route a device2 log line to service
Parameters:
run_id (str): ID of the test run
timestamp (float): Time (milliseconds after epoc)
log_priority (int): Priority of the log message (use LogPriority enum)
source_buffer_type (int): Source buffer of the log message (use SourceBuffer enum)
tag (str): Tag of the message
data (str): Message data
Returns:
True is data buffered successfully; otherwise False
"""
self.__on_device_log_impl(run_id, 2, timestamp, log_priority, source_buffer_type, tag, data)
def __on_device_log_impl(self, run_id, device_index, timestamp, log_priority, source_buffer_type, tag, data):
if run_id is None or len(run_id.strip()) == 0:
return False
entry = connector_service_pb2.DeviceLogEntry()
entry.run_id = run_id
entry.device_index = device_index
entry.timestamp = timestamp
entry.priority = log_priority
entry.source_buffer = source_buffer_type
entry.tag = tag or ''
entry.data = data or ''
try:
self._device_log_entry_queue.put(entry)
return True
except Exception as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def stop_run(self, run_id, discard_results=False):
""" Requests the service to stop an ongoing test run
Parameters:
run_id: ID of the test run
discard_results: True to discard al the test data
Returns:
True is notification sent successfully; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
request = connector_service_pb2.StopRunRequest()
request.run_id = run_id
request.discard_results = discard_results
try:
self._wait_device_log_queue()
self._blockingStub.StopRun(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
# For implementing a test node for the test cloud -->
def subscribe_to_test_requests(self, listener):
""" Subscribe to test requests
Parameters:
listener (TestRequestListenerBase): Listener handler for asynchronous requests
"""
if isinstance(listener, TestRequestListenerBase):
threading.Thread(target=self._subscribe_to_test_requests, args=[listener], daemon=False).start()
def _subscribe_to_test_requests(self, listener):
if isinstance(listener, TestRequestListenerBase):
try:
for request in self._blockingStub.SubscribeToTestRequests(empty_pb2.Empty()):
if request and hasattr(request, 'request_type'):
try:
rt = request.request_type
if rt == 1:
start_req = connector_service_pb2.TestStartRequest()
start_req.ParseFromString(request.payload)
listener.on_test_start_requested(start_req)
elif rt == 2:
stop_req = connector_service_pb2.TestStopRequest()
stop_req.ParseFromString(request.payload)
listener.on_test_stop_requested(stop_req)
except Exception as ex:
listener.on_error(ex)
listener.on_completed()
except grpc.RpcError as e:
listener.on_error(e)
self._log(2, 'RPC failed: %s' % str(e))
def respond_to_test_request(self, run_id, status, result, fail_cause, log):
""" Send response to a test request
Parameters:
run_id (str): ID of the local test run
status (str): Status
result (bool): True if test run was executed without issues
fail_cause (str): Fail cause if any
log (list[str]): Tool log
Returns:
True if response was successfully sent to service; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
resp = connector_service_pb2.TestStartResponse()
resp.run_id = run_id
resp.status = status or ''
resp.result = result
resp.fail_cause = fail_cause or ''
if log and len(log):
resp.log.extend(log)
try:
self._blockingStub.RespondToTestRequest(resp)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def add_node(self, node_id, pool, node_type, variables):
""" Establishes a test node for the service (test farm)
Parameters:
node_id (str): Unique ID of the node
pool (str): Name of the pool/group this node belongs to
node_type (str): Type of the node
variables (str): Variables
Return:
True if request was successfully sent to service; otherwise False
"""
if node_id is None or len(node_id.strip()) == 0:
return False
request = connector_service_pb2.NodeAdded()
request.node_id = node_id
request.pool = pool or ''
request.type = node_type or ''
request.variables = variables or '{}'
try:
self._blockingStub.AddNode(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def remove_node(self, node_id):
""" Removes the test node from the service (test farm)
Parameters:
node_id: Unique ID of the node
Returns:
True if request was successfully sent to service; otherwise False
"""
if node_id is None or len(node_id.strip()) == 0:
return False
request = connector_service_pb2.NodeRemoved()
request.node_id = node_id
try:
self._blockingStub.RemoveNode(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def update_node(self, node_id, current_use_case, run_state, current_run_id, node_state, pool, variables):
""" Updates the service with information from an existing test node
Parameters:
node_id (str): Unique ID of the node
current_use_case (str): Name of current use case
run_state (int): State of the run
current_run_id (str): ID of the current run
node_state (int): State of the node
pool (str): Name of the pool/group this node belongs to
variables (str): Variables
Returns:
True if request was successfully sent to service; otherwise False
"""
if node_id is None or len(node_id.strip()) == 0:
return False
request = connector_service_pb2.NodeUpdated()
request.node_id = node_id
if current_use_case and len(current_use_case.strip()):
request.current_use_case.value = current_use_case
if current_run_id and len(current_run_id.strip()):
request.current_run_id.value = current_run_id
if pool and len(pool.strip()):
request.pool.value = pool
if variables and len(variables.strip()):
request.variables.value = variables
if node_state is not None:
request.node_state.value = node_state
if run_state is not None:
request.run_state.value = run_state
try:
self._blockingStub.UpdateNode(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
def ping_run(self, run_id):
""" Ping Run
Parameters:
run_id (str): ID of the local test run
Returns:
True if request was successfully sent to service; otherwise False
"""
if run_id is None or len(run_id.strip()) == 0:
return False
request = connector_service_pb2.PingRunRequest()
request.run_id = run_id
try:
self._blockingStub.PingRun(request)
return True
except grpc.RpcError as e:
self._log(2, 'RPC failed: %s' % str(e))
return False
# noinspection PyBroadException
def get_bytes_from_file(filename):
if filename is not None and path.exists(filename):
try:
return open(filename, "rb").read()
except Exception as e:
pass
if __name__ == '__main__':
if len(sys.argv):
args = sys.argv[1:]
p = re.compile('([^:]+):([0-9]+)')
address = None
port = None
if len(args) >= 1 and (p.match(args[0])):
m = p.match(args[0])
address = m.group(1)
port = int(m.group(2))
elif len(args) >= 2 and re.search('[0-9]+', args[1]):
address = args[0]
port = int(args[1])
else:
print('Invalid parameters; give address and port for ping test: [address]:[port]')
Connector.log = lambda x, y: print(y)
print('Connecting to %s:%d ...' % (address, port))
client = None
try:
client = Connector(address, port)
print('Pinging ..')
if client.ping() == PingResponseType.OK:
print('Successfully pinged the service')
else:
print('Failed to ping the service')
finally:
if client:
print('Shutting down ...')
client.shutdown()
else:
print('Invalid parameters; give address and port for ping test: [address]:[port]')
|
server.py
|
import asyncio
import multiprocessing
import os
import sys
import traceback
from collections import deque
from functools import partial
from inspect import isawaitable
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from time import time
from httptools import HttpRequestParser # type: ignore
from httptools.parser.errors import HttpParserError # type: ignore
from sanic.compat import Header, ctrlc_workaround_for_windows
from sanic.exceptions import (
HeaderExpectationFailed,
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import EXPECT_HEADER, Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop # type: ignore
if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
OS_IS_WINDOWS = os.name == "nt"
class Signal:
stopped = False
# Protocol协议
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
# 限制类可新增的属性和方法
__slots__ = (
# app
"app",
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_body_chunks",
)
def __init__(
self,
*,
loop,
app,
signal=Signal(),
connections=None,
state=None,
**kwargs,
):
asyncio.set_event_loop(loop)
self.loop = loop
# major=3 < 3,minor=7<7 True
deprecated_loop = self.loop if sys.version_info < (3, 7) else None
self.app = app
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.signal = signal
self.access_log = self.app.config.ACCESS_LOG
self.connections = connections if connections is not None else set()
self.request_handler = self.app.handle_request
self.error_handler = self.app.error_handler
self.request_timeout = self.app.config.REQUEST_TIMEOUT
self.request_buffer_queue_size = (
self.app.config.REQUEST_BUFFER_QUEUE_SIZE
)
self.response_timeout = self.app.config.RESPONSE_TIMEOUT
self.keep_alive_timeout = self.app.config.KEEP_ALIVE_TIMEOUT
self.request_max_size = self.app.config.REQUEST_MAX_SIZE
self.request_class = self.app.request_class or Request
self.is_request_stream = self.app.is_request_stream
self._is_stream_handler = False
# 通知并发任务,asyncio.Event 类似 threading.Event 用来允许多个消费者等待某个事情发生,
# 不用通过监听一个特殊的值的来说实现类似通知的功能。
self._not_paused = asyncio.Event(loop=deprecated_loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = self.app.config.KEEP_ALIVE
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._not_paused.set()
self._body_chunks = deque()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = time()
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self.app.debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=Header(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
app=self.app,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.request.headers.get(EXPECT_HEADER):
self.expect_handler()
if self.is_request_stream:
self._is_stream_handler = self.app.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def expect_handler(self):
"""
Handler for Expect Header.
"""
expect = self.request.headers.get(EXPECT_HEADER)
if self.request.version == "1.1":
if expect.lower() == "100-continue":
self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
self.write_error(
HeaderExpectationFailed(f"Unknown Expect: {expect}")
)
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
# body chunks can be put into asyncio.Queue out of order if
# multiple tasks put concurrently and the queue is full in python
# 3.7. so we should not create more than one task putting into the
# queue simultaneously.
self._body_chunks.append(body)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
else:
self.request.body_push(body)
async def body_append(self, body):
if (
self.request is None
or self._request_stream_task is None
or self._request_stream_task.cancelled()
):
return
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
async def stream_append(self):
while self._body_chunks:
body = self._body_chunks.popleft()
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._body_chunks.append(None)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = time()
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = f"{self.request.ip}:{self.request.port}"
extra["request"] = f"{self.request.method} {self.request.url}"
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(f"Writing response failed, connection closed {e!r}")
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
async def drain(self):
await self._not_paused.wait()
async def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(f"Writing response failed, connection closed {e!r}")
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
f"Writing error failed, connection closed {e!r}",
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport is None or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
(
self.transport.get_extra_info("peername")
if self.transport is not None
else "N/A"
),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser and self.transport is not None:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
# for e in after_start
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
class AsyncioServer:
"""
Wraps an asyncio server with functionality that might be useful to
a user who needs to manage the server lifecycle manually.
"""
__slots__ = (
"loop",
"serve_coro",
"_after_start",
"_before_stop",
"_after_stop",
"server",
"connections",
)
def __init__(
self,
loop,
serve_coro,
connections,
after_start,
before_stop,
after_stop,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.loop = loop
self.serve_coro = serve_coro
self._after_start = after_start
self._before_stop = before_stop
self._after_stop = after_stop
self.server = None
self.connections = connections
def after_start(self):
"""Trigger "after_server_start" events"""
# for i in after_start:
# rununtil_comlete(i(loop))
trigger_events(self._after_start, self.loop)
def before_stop(self):
"""Trigger "before_server_stop" events"""
trigger_events(self._before_stop, self.loop)
def after_stop(self):
"""Trigger "after_server_stop" events"""
trigger_events(self._after_stop, self.loop)
def is_serving(self):
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
if self.server:
return self.server.wait_closed()
def close(self):
if self.server:
self.server.close()
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def start_serving(self):
if self.server:
try:
return self.server.start_serving()
except AttributeError:
raise NotImplementedError(
"server.start_serving not available in this version "
"of asyncio or uvloop."
)
def serve_forever(self):
if self.server:
try:
return self.server.serve_forever()
except AttributeError:
raise NotImplementedError(
"server.serve_forever not available in this version "
"of asyncio or uvloop."
)
def __await__(self):
"""Starts the asyncio server, returns AsyncServerCoro"""
task = asyncio.ensure_future(self.serve_coro)
while not task.done():
yield
self.server = task.result()
return self
def serve(
host,
port,
app,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
ssl=None,
sock=None,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
state=None,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param run_async: bool: Do not create a new event loop for the server,
and return an AsyncServer object rather than running it
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if app.debug:
loop.set_debug(app.debug)
app.asgi = False
connections = connections if connections is not None else set()
# 返回httpprotocol 协议对象( )其中部分参数被保护(传递给protocol),返回一个更为简单的对象
# functools.partial返回一个函数对象,这个函数对象是第一个参数(函数)对象
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
app=app,
state=state,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs,
)
if run_async:
return AsyncioServer(
loop=loop,
serve_coro=server_coroutine,
connections=connections,
after_start=after_start,
before_stop=before_stop,
after_stop=after_stop,
)
# 对每个before_start执行一遍loop
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
# signal_func 就是signal.signal
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
if OS_IS_WINDOWS:
ctrlc_workaround_for_windows(app)
else:
for _signal in [SIGTERM] if run_multiple else [SIGINT, SIGTERM]:
loop.add_signal_handler(_signal, app.stop)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
graceful = app.config.GRACEFUL_SHUTDOWN_TIMEOUT
start_shutdown = 0
while connections and (start_shutdown < graceful):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
processes = []
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
mp = multiprocessing.get_context("fork")
for _ in range(workers):
process = mp.Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
layer_artist.py
|
import os
import uuid
import time
import shutil
import tempfile
import matplotlib
from glue.core import Data, Subset
from glue.viewers.common.layer_artist import LayerArtist
from .layer_state import OpenSpaceLayerState
from .utils import get_point_data
from threading import Thread
from matplotlib.colors import ColorConverter
to_rgb = ColorConverter.to_rgb
to_hex = matplotlib.colors.to_hex
__all__ = ['OpenSpaceLayerArtist', 'protocol_version']
# TODO move this to later
# TODO make this image selectable by user
TEXTURE_ORIGIN = os.path.abspath(os.path.join(os.path.dirname(__file__), 'halo.png'))
TEXTURE = tempfile.mktemp(suffix='.png')
shutil.copy(TEXTURE_ORIGIN, TEXTURE)
# Time to wait after sending websocket message
WAIT_TIME = 0.01
protocol_version = "1.0"
continue_listening = True
will_send_message = True
has_luminosity_data = False
has_velocity_data = False
class OpenSpaceLayerArtist(LayerArtist):
_layer_state_cls = OpenSpaceLayerState
def __init__(self, viewer, *args, **kwargs):
super(OpenSpaceLayerArtist, self).__init__(*args, **kwargs)
self._viewer = viewer
self.state.add_global_callback(self._on_attribute_change)
self._viewer_state.add_global_callback(self._on_attribute_change)
self._uuid = None
self._display_name = None
self._state = None
self.threadCommsRx = Thread(target=self.request_listen)
self.threadCommsRx.daemon = True
self.threadCommsRx.start()
@property
def sock(self):
return self._viewer.socket
def _on_attribute_change(self, **kwargs):
global will_send_message
force = kwargs.get('force', False)
if self.sock is None:
return
if self._viewer_state.lon_att is None or self._viewer_state.lat_att is None:
return
changed = self.pop_changed_properties()
if len(changed) == 0 and not force:
return
# If properties update in Glue, send message to OS with new values
if self._uuid is not None:
if will_send_message is False:
return
message_type = ""
subject = ""
length_of_subject = ""
identifier = self._uuid
length_of_identifier = str(len(identifier))
if "alpha" in changed:
message_type = "UPOP"
# Round up to 7 decimals to avoid length_of_value being double digits
# since OpenSpace expects the length_of_value to be 1 byte of the subject
value = str(round(self.state.alpha, 7))
length_of_value = str(len(value))
subject = length_of_identifier + identifier + length_of_value + value
length_of_subject = str(format(len(subject), "09"))
elif "color" in changed:
message_type = "UPCO"
value = str(to_rgb(self.state.color))
length_of_value = str(len(value))
subject = length_of_identifier + identifier + length_of_value + value
length_of_subject = str(format(len(subject), "09"))
elif "size" in changed:
message_type = "UPSI"
value = str(self.state.size)
length_of_value = str(len(value))
subject = length_of_identifier + identifier + length_of_value + value
length_of_subject = str(format(len(subject), "09"))
elif "visible" in changed:
message_type = "TOVI"
if self.state.visible is False:
value = "F"
elif self.state.visible is True:
value = "T"
else:
return
subject = length_of_identifier + identifier + value
length_of_subject = str(format(len(subject), "09"))
# Send the correct message to OpenSpace
if subject:
message = protocol_version + message_type + length_of_subject + subject
self.sock.send(bytes(message, 'utf-8'))
print('Message sent: ', message)
time.sleep(WAIT_TIME)
self.redraw()
return
# On reselect of subset data, remove old scene graph node and resend data
if isinstance(self.state.layer, Subset):
state = self.state.layer.subset_state
if state is not self._state:
self._state = state
self.remove_scene_graph_node()
self.send_point_data()
self.redraw()
return
self.clear()
# Store state of subset to track changes from reselection of subset
if isinstance(self.state.layer, Subset):
self._state = self.state.layer.subset_state
self.send_point_data()
self.redraw()
# Create and send a message including the point data to OpenSpace
def send_point_data(self):
# Create string with coordinates for point data
try:
message_type = "PDAT"
# Create a random identifier
self._uuid = str(uuid.uuid4())
if isinstance(self.state.layer, Data):
self._display_name = self.state.layer.label
else:
self._display_name = self.state.layer.label + ' (' + self.state.layer.data.label + ')'
identifier = self._uuid
length_of_identifier = str(len(identifier))
color = str(to_rgb(self.state.color))
length_of_color = str(len(color))
opacity = str(round(self.state.alpha, 7))
length_of_opacity = str(len(opacity))
gui_name = self._display_name
length_of_gui = str(len(gui_name))
size = str(self.state.size)
length_of_size = str(len(size))
point_data = get_point_data(self.state.layer,
self._viewer_state.lon_att,
self._viewer_state.lat_att,
alternative_attribute=self._viewer_state.alt_att,
alternative_unit=self._viewer_state.alt_unit,
frame=self._viewer_state.frame)
subject = (
length_of_identifier + identifier +
length_of_color + color +
length_of_opacity + opacity +
length_of_size + size +
length_of_gui + gui_name +
point_data
)
length_of_subject = str(format(len(subject), "09"))
message = protocol_version + message_type + length_of_subject + subject
self.sock.send(bytes(message, 'utf-8'))
# Wait for a short time to avoid sending too many messages in quick succession
time.sleep(WAIT_TIME)
except Exception as exc:
print(str(exc))
return
def remove_scene_graph_node(self):
# Create and send "Remove Scene Graph Node" message to OS
message_type = "RSGN"
subject = self._uuid
length_of_subject = str(format(len(subject), "09"))
message = protocol_version + message_type + length_of_subject + subject
self.sock.send(bytes(message, 'utf-8'))
print('Messaged sent: ', message)
# Wait for a short time to avoid sending too many messages in quick succession
time.sleep(WAIT_TIME)
def request_listen(self):
global continue_listening
print("Starting request_listen")
while continue_listening:
while self.sock is None:
time.sleep(1.0);
self.receive_message()
time.sleep(0.1)
def receive_message(self):
global will_send_message
try:
message_received = self.sock.recv(4096).decode('ascii')
except:
return
print('Received message from socket: ', message_received)
# Start and end are message offsets
start = 0
end = 4
message_type = message_received[start:end]
start += 4
end += 4
length_of_subject = int(message_received[start: end])
start += 4
end += length_of_subject
subject = message_received[start:end]
# Resetting message offsets to read from subject
start = 0
end = 2
length_of_identifier = int(subject[start:end])
start += 2
end += length_of_identifier
identifier = subject[start:end]
start += length_of_identifier
if message_type == "UPCO":
end += 2
else:
end += 1
for layer in self._viewer.layers:
if layer._uuid == identifier:
# Update Color
if message_type == "UPCO":
length_of_value = int(subject[start:end])
start = end
end += length_of_value
# Value is received in this format: (redValue, greenValue, blueValue)
UPCO_string_value = subject[start + 1:end - 1] # Don't include ( and )
UPCO_len_string_value = len(UPCO_string_value)
x = 0
red = ""
while UPCO_string_value[x] != ",": # first value in string is before first ","
red += UPCO_string_value[x]
x += 1
r = float(red)
x += 1
green = ""
while UPCO_string_value[x] != ",": # second value in string is before second ","
green += UPCO_string_value[x]
x += 1
g = float(green)
x += 1
blue = ""
for y in range(x, UPCO_len_string_value): # third value in string
blue += UPCO_string_value[y]
y += 1
b = float(blue)
UPCO_value = to_hex([r, g, b])
will_send_message = False
layer.state.color = UPCO_value
break
# Update Opacity
if message_type == "UPOP":
length_of_value = int(subject[start:end])
start = end
end += length_of_value
UPOP_value = float(subject[start:end])
will_send_message = False
layer.state.alpha = UPOP_value
break
# Update Size
if message_type == "UPSI":
length_of_value = int(subject[start:end])
start = end
end += length_of_value
UPSI_value = float(subject[start:end])
will_send_message = False
layer.state.size = UPSI_value
break
# Toggle Visibility
if message_type == "TOVI":
TOVI_value = subject[start]
will_send_message = False
if TOVI_value == "F":
layer.state.visible = False
else:
layer.state.visible = True
break
break
time.sleep(WAIT_TIME)
will_send_message = True
self.redraw()
def clear(self):
if self.sock is None:
return
if self._uuid is None:
return
self.remove_scene_graph_node()
self._uuid = None
self.redraw()
def update(self):
if self.sock is None:
return
self._on_attribute_change(force=True)
|
run_experiments.py
|
#!/usr/bin/python
import subprocess
import threading
import multiprocessing
conf_str_template = '''init_cwnd: 12
max_cwnd: 15
retx_timeout: 45e-06
queue_size: 36864
propagation_delay: 0.0000002
bandwidth: 40000000000.0
queue_type: {0}
flow_type: 2
num_flow: 100000
flow_trace: CDF_aditya.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 0
host_type: 1
traffic_imbalance: 0
load: 0.6
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 1
srpt_mode: {1}
flow_split_mode: {2}
congestion_compress: {3}
pq_mode: {4}
srpt_with_fair: {5}
'''
template = './simulator 1 conf_{0}_split={1}_compres={2}.txt > result_{0}_split={1}_compres={2}.txt'
def getNumLines(trace):
out = subprocess.check_output('wc -l {}'.format(trace), shell=True)
return int(out.split()[0])
def run_exp(rw, semaphore):
semaphore.acquire()
print template.format(*rw)
subprocess.call(template.format(*rw), shell=True)
semaphore.release()
queue_types = [2, 2, 2, 2, 2, 3, 3, 3, 3, 4]
srpt_mode = [10, 20, 30,30,30, 10, 30, 0, 0, 0]
pq_mode = [0,0,0,0,0,30,30,20,10,0]
srf = [0,0,0,1,2,0,0,0,0,0]
names = ["srpt", "wrong_srpt", "lazy_srpt" , "lazy_srpt_fair", "lazy_srpt_and_wrong", "srpt_pq", "lazy_srpt_pq", "fifo", "fairness", "sergei_fairness"]
threads = []
semaphore = threading.Semaphore(multiprocessing.cpu_count())
comr_scale = 0.05
for comr in range(1, 11):
for split_mode in range(5, 6):
for i in range(0, 10):
rv = comr_scale * comr;
conf_str = conf_str_template.format(queue_types[i], srpt_mode[i], split_mode, rv, pq_mode[i], srf[i])
conf_file = "conf_{0}_split={1}_compres={2}.txt".format(names[i], split_mode, rv)
with open(conf_file, 'w') as f:
print conf_file
f.write(conf_str)
threads.append(threading.Thread(target=run_exp, args=((names[i], split_mode, rv), semaphore)))
print '\n'
print '\n'
[t.start() for t in threads]
[t.join() for t in threads]
print 'finished', len(threads), 'experiments'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.